diff --git a/.gitignore b/.gitignore
index ab3e8ce..1dbb238 100644
--- a/.gitignore
+++ b/.gitignore
@@ -162,3 +162,17 @@ cython_debug/
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
+
+
+# 3rdparty/
+# __MACOSX/
+# __pycache__/
+# results/
+# ./debug/
+# *.zip
+# .DS_Store
+# *.pyc
+# .idea/
+# ./install_dependencies.sh
+# ./install_sc2.sh
+
diff --git a/123.py b/123.py
new file mode 100644
index 0000000..ed0f110
--- /dev/null
+++ b/123.py
@@ -0,0 +1 @@
+print('hello')
\ No newline at end of file
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..afdfe50
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2017 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/README.md b/README.md
index dbc91e5..84a9951 100644
--- a/README.md
+++ b/README.md
@@ -1,2 +1,257 @@
-# pymarl3
+# [pymarl3](https://github.com/tjuHaoXiaotian/pymarl3): the source code of the ICLR-2023 paper
+**[Boosting Multi-Agent Reinforcement Learning via Permutation Invariant and Permutation Equivariant Networks](https://openreview.net/pdf?id=OxNQXyZK-K8)**.
+
+We extend [**pymarl2** (https://github.com/hijkzzz/pymarl2)](https://github.com/hijkzzz/pymarl2) to **pymarl3**, adding the support for the [SMAC-V2 environment](https://github.com/oxwhirl/smacv2) and equipping the MARL algorithms with permutation invariance and permutation equivariance properties.
+
+## Key Features:
+* (1) **Support both [SMAC-V1](https://github.com/oxwhirl/smac) and [SMAC-V2](https://github.com/oxwhirl/smacv2)** (without the need of installing each environment separately).
+ * 
+* (2) Equip the MARL algorithms of [**pymarl2**](https://github.com/hijkzzz/pymarl2) with the **permutation invariance (PI) and permutation equivariance (PE)** properties. The proposed PI and PE model architectures **can be easily plugged into any existing MARL algorithms and boost their performance**.
+* (3) :rocket: **The enhanced algorithm achieves State-Of-The-Art (SOTA) performance on SMAC-V1 and SMAC-V2** (without restricting the agent field-of-view and shooting range to a cone).
+
+```
+[2023-07 update]: Commit the support for SMAC-V2.
+```
+
+## 1. Model Architecture of Hyper Policy Network (HPN)
+
+
+
+HPN incorporates [hypernetworks](https://arxiv.org/pdf/1609.09106) to generate different
+weights s for different input
+components s to improve representational capacity while ensuring the
+same  always be assigned with the same
+weight . The architecture of our HPN is shown in the above Figure (b). We
+also take the ) as an example. The model mainly composes of two modules:
+
+**Permutation Invariant Input Layer.** [hypernetworks](https://arxiv.org/pdf/1609.09106) are a family of neural
+architectures which use one network, known as hypernetwork, to generate the weights for another network. In our setting,
+the hypernetwork is utilized to generate a different  for
+each  of the input set . As
+shown in above Figure (b),  (which can be viewed as a batch
+of  s each of which is of
+dimension , represented by different shades of blue) is firstly fed into a
+shared hypernetwork (marked in yellow), whose input size is  and output size
+is . Then, the corresponding outputs are reshaped
+to  and serve as the submodule
+weights s of the normal FC layer (see Figure (a)). Note that
+different s will generate
+different s and the same  will
+always correspond to the same . Then,
+each  is multiplied by  and all
+multiplication results and the bias  are summed together to get the output.
+Since each element  is processed separately by its
+corresponding  and then merged by a permutation invariant 'sum' function,
+the permutation invariance is reserved.
+
+**Permutation Equivariance Output Layer.** Similarly, to keep the whole network permutation equivariance, the submodular
+weights and bias of the agent-related actions in the output layer,
+e.g.,  of SMAC, are also generated by a
+hypernetwork. As mentioned above, the input  and
+output  of the hypernetwork always correspond one-to-one, so the input order
+change will result in the same output order change, thus achieving permutation equivariance.
+
+We emphasize that HPN is a general design and can be easily integrated into existing MARL algorithms (
+e.g., [VDN](https://arxiv.org/pdf/1706.05296?ref=https://githubhelp.com)
+, [QMIX](http://proceedings.mlr.press/v80/rashid18a/rashid18a.pdf)
+, [MADDPG](https://proceedings.neurips.cc/paper/2017/file/68a9750337a418a86fe06c1991a1d64c-Paper.pdf)
+, [MAPPO](https://arxiv.org/pdf/2103.01955?ref=https://githubhelp.com)) to boost the learning speed as well as the
+converged performance. All parameters of HPN are simply trained end-to-end with backpropagation according to the
+corresponding RL loss function.
+
+## 2. Experimental Results on SAMC-V1
+
+We mainly evaluate our methods on the challenging StarCraft II micromanagement
+benchmark [(SMAC)](https://github.com/oxwhirl/smac).
+
+****
+
+```
+StarCraft 2 version: SC2.4.10. difficulty: 7.
+```
+
+| Senarios | Difficulty | HPN-QMIX |
+|----------------|:----------:|:----------------------------------:|
+| 8m_vs_9m | Hard | **100%** |
+| 5m_vs_6m | Hard | **100%** |
+| 3s_vs_5z | Hard | **100%** |
+| bane_vs_bane | Hard | **100%** |
+| 2c_vs_64zg | Hard | **100%** |
+| corridor | Super Hard | **100%** |
+| MMM2 | Super Hard | **100%** |
+| 3s5z_vs_3s6z | Super Hard |**100%** |
+| 27m_vs_30m | Super Hard | **100%** |
+| 6h_vs_8z | Super Hard | **98%** |
+
+### 2.1 Applying HPN to fine-tuned VDN and QMIX.
+
+
+
+### 2.2 Applying HPN to QPLEX and MAPPO.
+
+
+
+
+### 2.3 Comparison with baselines considering permutation invariance or permutation equivariance.
+
+
+
+### 2.4 Transfer results.
+
+Apart from achieving PI and PE, another benefit of HPN is that it can naturally handle variable numbers of inputs and
+outputs. Therefore, as also stated in the conclusion section, HPN can be potentially used to design more efficient
+multitask learning and transfer learning algorithms. For example, we can directly transfer the learned HPN policy in one
+task to new tasks with different numbers of agents and improve the learning efficiency in the new tasks. Transfer
+learning results of 5m → 12m, 5m_vs_6m → 8m_vs_10m, 3s_vs_3z → 3s_vs_5z are shown in the following figures. We see that
+the previously trained HPN policies can serve as better initialization policies for new tasks.
+
+
+## 3. Experimental Results on SAMC-V2
+
+### 3.1 Changes of SMAC-V2.
+
+SMAC-v2 makes three major changes to SMAC: randomising start positions, randomising unit types, and restricting the
+agent field-of-view and shooting range to a cone. These first two changes increase more randomness to challenge
+contemporary MARL algorithms. The third change makes features harder to infer and adds the challenge that agents must
+actively gather information (require more efficient exploration). **Since our target is not to design more efficient
+exploration algorithms, we keep the field-of-view and attack of the agents a full circle as in SMAC-V1.**
+
+* **Random Start Positions:** Random start positions come in two different types. First, there is the `surrounded`
+ type, where the allied units are spawned in the middle of the map, and surrounded by enemy units. This challenges the
+ allied units to overcome the enemies approach from multiple angles at once. Secondly, there are the `reflect_position`
+ scenarios. These randomly select positions for the allied units, and then reflect their positions in the midpoint of
+ the map to get the enemy spawn positions. Examples are shown in the figure
+ below. 
+* **Random Unit Types:** Battles in SMAC-V2 do not always feature units of the same type each time, as they did in
+ SMAC. Instead, units are spawned randomly according to certain pre-fixed probabilities. Units in StarCraft II are
+ split up into different races. Units from different races cannot be on the same team. For each of the three races (
+ Protoss, Terran, and Zerg), SMAC-V2 uses three unit types. Detailed generation probabilities are shown in the figure
+ below. 
+
+### 3.2 Experimental Results.
+
+**Our HPN can naturally handle the two types of new challenges.** Thanks to the PI and PE properties, our HPN is more
+robust to the randomly changed start positions of the entities. Thanks to the entity-wise modeling and using
+hypernetwork to generate a customized `weight matrix` for each type of unit, HPN can handle the randomly generated unit
+types as well. The comparisons of HPN-VDN with VDN on three difficult scenarios across the three races (Protoss, Terran,
+and Zerg) are shown in the figures below. Results show that our HPN significantly improves the sample efficiency and the
+converged test win rates of the baseline VDN.
+
+
+## 4. How to use the code?
+
+### 4.1 Install this repository.
+```shell
+# Step-1: Clone the repository and enter the folder.
+git clone git@github.com:tjuHaoXiaotian/pymarl3.git
+cd pymarl3
+
+# Step-2: Install StarCraftII add the custom maps.
+chmod +x install_sc2.sh
+./install_sc2.sh
+
+# Step-3: Install PyTorch and other Python packages.
+chmod +x install_dependencies.sh
+./install_dependencies.sh
+```
+
+### 4.2 Detailed command lines to reproduce all experimental results (on SMAC-V1).
+
+```shell
+# For SMAC, take the hpn_qmix, qmix, hpn_qplex and qplex over all hard and super-hard scenarios for example.
+
+# 5m_vs_6m
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=hpn_qmix --env-config=sc2 with env_args.map_name=5m_vs_6m obs_agent_id=True obs_last_action=False runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=qmix --env-config=sc2 with env_args.map_name=5m_vs_6m obs_agent_id=True obs_last_action=True runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=hpn_qplex --env-config=sc2 with env_args.map_name=5m_vs_6m obs_agent_id=True obs_last_action=False runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=qplex --env-config=sc2 with env_args.map_name=5m_vs_6m obs_agent_id=True obs_last_action=True runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6
+
+# 3s5z_vs_3s6z
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=hpn_qmix --env-config=sc2 with env_args.map_name=3s5z_vs_3s6z obs_agent_id=True obs_last_action=False runner=parallel batch_size_run=4 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=qmix --env-config=sc2 with env_args.map_name=3s5z_vs_3s6z obs_agent_id=True obs_last_action=True runner=parallel batch_size_run=4 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=hpn_qplex --env-config=sc2 with env_args.map_name=3s5z_vs_3s6z obs_agent_id=True obs_last_action=False runner=parallel batch_size_run=4 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=qplex --env-config=sc2 with env_args.map_name=3s5z_vs_3s6z obs_agent_id=True obs_last_action=True runner=parallel batch_size_run=4 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6
+
+# 6h_vs_8z
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=hpn_qmix --env-config=sc2 with env_args.map_name=6h_vs_8z obs_agent_id=True obs_last_action=False runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=500000 batch_size=128 td_lambda=0.3 hpn_head_num=2
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=qmix --env-config=sc2 with env_args.map_name=6h_vs_8z obs_agent_id=True obs_last_action=True runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=500000 batch_size=128 td_lambda=0.3 hpn_head_num=2
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=hpn_qplex --env-config=sc2 with env_args.map_name=6h_vs_8z obs_agent_id=True obs_last_action=False runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=500000 batch_size=128 td_lambda=0.3 hpn_head_num=2
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=qplex --env-config=sc2 with env_args.map_name=6h_vs_8z obs_agent_id=True obs_last_action=True runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=500000 batch_size=128 td_lambda=0.3 hpn_head_num=2
+
+# 8m_vs_9m
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=hpn_qmix --env-config=sc2 with env_args.map_name=8m_vs_9m obs_agent_id=True obs_last_action=False runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=qmix --env-config=sc2 with env_args.map_name=8m_vs_9m obs_agent_id=True obs_last_action=True runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=hpn_qplex --env-config=sc2 with env_args.map_name=8m_vs_9m obs_agent_id=True obs_last_action=False runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=qplex --env-config=sc2 with env_args.map_name=8m_vs_9m obs_agent_id=True obs_last_action=True runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6
+
+# 3s_vs_5z
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=hpn_qmix --env-config=sc2 with env_args.map_name=3s_vs_5z obs_agent_id=True obs_last_action=False runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6 hpn_head_num=2
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=qmix --env-config=sc2 with env_args.map_name=3s_vs_5z obs_agent_id=True obs_last_action=True runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6 hpn_head_num=2
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=hpn_qplex --env-config=sc2 with env_args.map_name=3s_vs_5z obs_agent_id=True obs_last_action=False runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6 hpn_head_num=2
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=qplex --env-config=sc2 with env_args.map_name=3s_vs_5z obs_agent_id=True obs_last_action=True runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6 hpn_head_num=2
+
+# corridor
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=hpn_qmix --env-config=sc2 with env_args.map_name=corridor obs_agent_id=True obs_last_action=False runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=qmix --env-config=sc2 with env_args.map_name=corridor obs_agent_id=True obs_last_action=True runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=hpn_qplex --env-config=sc2 with env_args.map_name=corridor obs_agent_id=True obs_last_action=False runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=qplex --env-config=sc2 with env_args.map_name=corridor obs_agent_id=True obs_last_action=True runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6
+
+# MMM2
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=hpn_qmix --env-config=sc2 with env_args.map_name=MMM2 obs_agent_id=True obs_last_action=False runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=qmix --env-config=sc2 with env_args.map_name=MMM2 obs_agent_id=True obs_last_action=True runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=hpn_qplex --env-config=sc2 with env_args.map_name=MMM2 obs_agent_id=True obs_last_action=False runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=qplex --env-config=sc2 with env_args.map_name=MMM2 obs_agent_id=True obs_last_action=True runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6
+
+# 27m_vs_30m
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=hpn_qmix --env-config=sc2 with env_args.map_name=27m_vs_30m obs_agent_id=True obs_last_action=False runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=qmix --env-config=sc2 with env_args.map_name=27m_vs_30m obs_agent_id=True obs_last_action=True runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=hpn_qplex --env-config=sc2 with env_args.map_name=27m_vs_30m obs_agent_id=True obs_last_action=False runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=qplex --env-config=sc2 with env_args.map_name=27m_vs_30m obs_agent_id=True obs_last_action=True runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6
+
+# 2c_vs_64zg
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=hpn_qmix --env-config=sc2 with env_args.map_name=2c_vs_64zg obs_agent_id=True obs_last_action=False runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=qmix --env-config=sc2 with env_args.map_name=2c_vs_64zg obs_agent_id=True obs_last_action=True runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=hpn_qplex --env-config=sc2 with env_args.map_name=2c_vs_64zg obs_agent_id=True obs_last_action=False runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=qplex --env-config=sc2 with env_args.map_name=2c_vs_64zg obs_agent_id=True obs_last_action=True runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6
+
+# bane_vs_bane
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=hpn_qmix --env-config=sc2 with env_args.map_name=bane_vs_bane obs_agent_id=True obs_last_action=False runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=qmix --env-config=sc2 with env_args.map_name=bane_vs_bane obs_agent_id=True obs_last_action=True runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=hpn_qplex --env-config=sc2 with env_args.map_name=bane_vs_bane obs_agent_id=True obs_last_action=False runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=qplex --env-config=sc2 with env_args.map_name=bane_vs_bane obs_agent_id=True obs_last_action=True runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6
+```
+
+### 4.3 Detailed command lines to reproduce the experimental results (on SMAC-V2).
+
+```shell
+#%%%%%%%%%%%%%%%%%%% sc2_v2_terran %%%%%%%%%%%%%%%%%%%%%
+CUDA_VISIBLE_DEVICES="1" python src/main.py --config=hpn_vdn --env-config=sc2_v2_terran with obs_agent_id=True obs_last_action=False runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6 mixer=vdn
+
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=vdn --env-config=sc2_v2_terran with obs_agent_id=True obs_last_action=False runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6 mixer=vdn
+
+#%%%%%%%%%%%%%%%%%%% sc2_v2_protoss %%%%%%%%%%%%%%%%%%%%%
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=hpn_vdn --env-config=sc2_v2_protoss with obs_agent_id=True obs_last_action=False runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6 mixer=vdn
+
+CUDA_VISIBLE_DEVICES="1" python src/main.py --config=vdn --env-config=sc2_v2_protoss with obs_agent_id=True obs_last_action=False runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6 mixer=vdn
+
+#%%%%%%%%%%%%%%%%%%% sc2_v2_zerg %%%%%%%%%%%%%%%%%%%%%
+CUDA_VISIBLE_DEVICES="1" python src/main.py --config=hpn_vdn --env-config=sc2_v2_zerg with obs_agent_id=True obs_last_action=False runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6 mixer=vdn
+
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=vdn --env-config=sc2_v2_zerg with obs_agent_id=True obs_last_action=False runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6 mixer=vdn
+
+```
+
+## 5. Citation
+
+```text
+@article{hao2022api,
+ title={Boosting Multi-Agent Reinforcement Learning via Permutation Invariant and Permutation Equivariant Networks},
+ author={Hao Xiaotian, Hao Jianye, Mao Hangyu, Wang Weixun, Yang Yaodong, Li Dong, Zheng Yan, Wang Zhen},
+ journal={The Eleventh International Conference on Learning Representations.},
+ year={2023}
+}
+```
+
+
+
diff --git a/doc/README(old-version).md b/doc/README(old-version).md
new file mode 100644
index 0000000..9588010
--- /dev/null
+++ b/doc/README(old-version).md
@@ -0,0 +1,129 @@
+# API: Boosting Multi-Agent Reinforcement Learning via Agent-Permutation-Invariant Networks
+
+Open-source code for [API: Boosting Multi-Agent Reinforcement Learning via Agent-Permutation-Invariant Networks](https://arxiv.org/abs/xxxxx).
+
+[TOC]
+
+## 1. Motivation
+
+### 1.1 Permutation Invariance and Equivariance
+
+
+
+**Permutation Invariant Function.** A function  where  of size ) is a set consisting of  components (each of which is of dimension ), is said to be permutation invariant if permutation of input components does not change the output of the function. Mathematically, =f(M\left[x_1,x_2,\ldots\text{}x_m\right]^\mathsf{T})), where  is the permutation matrix of size ), which is a binary matrix that has exactly a single unit value in every row and column and zeros everywhere else.
+
+**Permutation Equivariant Function.** Similarly, a function  is permutation equivariant if permutation of input components permutes the output components with the same permutation . Mathematically, =M\left[y_1,y_2,\ldots\text{}y_m\right]^\mathsf{T}).
+
+### 1.2 Why Permutation Invariant Matters?
+
+In MARL, the environments typically consist of  components, including  learning agents and  non-player characters. Therefore, the states, observations are factorizable as sets of  components , where each component  represents an atomic semantic meaning (e.g., agent 's features) whose dimension is . Because shuffling the order of  components does not change the information of the set, one would expect many functions, e.g., the policy function ), possess permutation invariance and permutation equivariance. These properties can be exploited to design more efficient MARL algorithms, especially when the  components are homogeneous, i.e., semantically identical (belonging to the same type, having identical feature spaces, action spaces and reward functions).
+
+Taking ) as an example, the input is the observation , and the outputs are Q-values of all actions in . Since the  components are homogeneous, they have the same feature space, i.e., . Thus, the size of an **fixedly ordered representation** of  is . In contrast, using a **permutation invariant representation**, i.e., removing the influence of the input order, could reduce the size of the observation space by a factor of . As the number of homogeneous components increases, the removal of these redundancies results in a much smaller search space, upon which we could more easily learn a policy.
+
+Our objective is to design more flexible **Agent Permutation Invariant** (**API**) and **Agent Permutation Equivariant** (**APE**) models to greatly reduce the sample complexity of MARL algorithms. Also taking ) as the example, if there is a direct correspondence between the action Q-value in output and the component in input , then ) for these actions should be permutation equivariant; otherwise, ) should be permutation invariant.
+
+
+
+Note that this is very common for many multi-agent settings. For example, as illustrated in the above Figure, in the challenging [StarCraft II micromanagement benchmark (SMAC)](https://github.com/oxwhirl/smac), the input set  could be divided into 2 groups: an ally group  and an enemy group . The output Q-values of the actions could be divided into 2 groups as well: Q-values for move actions , i.e., , and attack actions . Since there is a one-to-one correspondence between the elements in  and , the Q-values of  should be equivariant to the permutations of , while the Q-values of  should be invariant to the permutations of the whole set . Overall, a desired model of ) should be both permutation invariant and permutation equivariance.
+
+
+
+## 2. Model Architecture of API-HyPerNetwork (API-HPN)
+
+
+
+API-HPN incorporates [hypernetworks](https://arxiv.org/pdf/1609.09106) to generate different weights s for different input components s to improve representational capacity while ensuring the same  always be assigned with the same weight . The architecture of our API-HPN is shown in the above Figure (b). We also take the ) as an example. The model mainly composes of two modules:
+
+**Agent Permutation Invariant Input Layer.** [hypernetworks](https://arxiv.org/pdf/1609.09106) are a family of neural architectures which use one network, known as hypernetwork, to generate the weights for another network. In our setting, the hypernetwork is utilized to generate a different  for each  of the input set . As shown in above Figure (b),  (which can be viewed as a batch of  s each of which is of dimension , represented by different shades of blue) is firstly fed into a shared hypernetwork (marked in yellow), whose input size is  and output size is . Then, the corresponding outputs are reshaped to  and serve as the submodule weights s of the normal FC layer (see Figure (a)). Note that different s will generate different s and the same  will always correspond to the same . Then, each  is multiplied by  and all multiplication results and the bias  are summed together to get the output. Since each element  is processed separately by its corresponding  and then merged by a permutation invariant 'sum' function, the permutation invariance is reserved.
+
+**Agent Permutation Equivariance Output Layer.** Similarly, to keep the whole network permutation equivariance, the submodular weights and bias of the agent-related actions in the output layer, e.g.,  of SMAC, are also generated by a hypernetwork. As mentioned above, the input  and output  of the hypernetwork always correspond one-to-one, so the input order change will result in the same output order change, thus achieving permutation equivariance.
+
+We emphasize that API-HPN is a general design and can be easily integrated into existing MARL algorithms (e.g., [VDN](https://arxiv.org/pdf/1706.05296?ref=https://githubhelp.com), [QMIX](http://proceedings.mlr.press/v80/rashid18a/rashid18a.pdf), [MADDPG](https://proceedings.neurips.cc/paper/2017/file/68a9750337a418a86fe06c1991a1d64c-Paper.pdf), [MAPPO](https://arxiv.org/pdf/2103.01955?ref=https://githubhelp.com)) to boost the learning speed as well as the converged performance. All parameters of API-HPN are simply trained end-to-end with backpropagation according to the corresponding RL loss function.
+
+
+
+## 3. Experiments
+
+### 3.1 Experimental Setups
+
+We mainly evaluate our methods in the challenging StarCraft II micromanagement benchmark [(SMAC)](https://github.com/oxwhirl/smac).
+
+****
+
+```
+StarCraft 2 version: SC2.4.10. difficulty: 7.
+```
+### 3.2 Evaluation Metric
+
+### 3.3 Code Implementations and Structure
+
+### 3.4 Results
+
+#### 3.4.1 Comparison with previous SOTA
+
+
+
+#### 3.4.2 Comparison with baselines considering permutation invariance and permutation equivariant property
+
+
+
+#### 3.4.3 Ablation Studies
+
+
+
+
+
+
+| Senarios | Difficulty | API-QMIX |
+|----------------|:----------:|:----------------------------------:|
+| 8m_vs_9m | Hard | **100%** |
+| 5m_vs_6m | Hard | **100%** |
+| 3s_vs_5z | Hard | **100%** |
+| bane_vs_bane | Hard | **100%** |
+| 2c_vs_64zg | Hard | **100%** |
+| corridor | Super Hard | **100%** |
+| MMM2 | Super Hard | **100%** |
+| 3s5z_vs_3s6z | Super Hard |**100%** |
+| 27m_vs_30m | Super Hard | **100%** |
+| 6h_vs_8z | Super Hard | **98%** |
+
+
+
+## 4. How to use the code?
+
+### 4.1 Detailed Command line tool to reproduce all experimental results
+
+
+
+**Run an experiment**
+
+```shell
+# For SMAC, take the 5m_vs_6m scenario for example.
+CUDA_VISIBLE_DEVICES="0" python src/main.py --config=api_vdn --env-config=sc2 with env_args.map_name=5m_vs_6m obs_agent_id=True obs_last_action=False runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6
+
+CUDA_VISIBLE_DEVICES="1" python src/main.py --config=api_qmix --env-config=sc2 with env_args.map_name=5m_vs_6m obs_agent_id=True obs_last_action=False runner=parallel batch_size_run=8 buffer_size=5000 t_max=10050000 epsilon_anneal_time=100000 batch_size=128 td_lambda=0.6
+```
+
+
+
+
+The config files act as defaults for an algorithm or environment.
+
+They are all located in `src/config`.
+`--config` refers to the config files in `src/config/algs`
+`--env-config` refers to the config files in `src/config/envs`
+
+
+
+# Citation
+```
+@article{,
+ title={API: Boosting Multi-Agent Reinforcement Learning via Agent-Permutation-Invariant Networks},
+ author={},
+ year={2022},
+ eprint={},
+ archivePrefix={arXiv},
+ primaryClass={cs.LG}
+}
+```
+
diff --git a/doc/figure/API-HPN.png b/doc/figure/API-HPN.png
new file mode 100644
index 0000000..ce1f7dd
Binary files /dev/null and b/doc/figure/API-HPN.png differ
diff --git a/doc/figure/API_APE_function.png b/doc/figure/API_APE_function.png
new file mode 100644
index 0000000..f220f85
Binary files /dev/null and b/doc/figure/API_APE_function.png differ
diff --git a/doc/figure/HPN-QPLEX.png b/doc/figure/HPN-QPLEX.png
new file mode 100644
index 0000000..560bfcf
Binary files /dev/null and b/doc/figure/HPN-QPLEX.png differ
diff --git a/doc/figure/HPN-mappo.png b/doc/figure/HPN-mappo.png
new file mode 100644
index 0000000..feda2d2
Binary files /dev/null and b/doc/figure/HPN-mappo.png differ
diff --git a/doc/figure/MA_transfer.png b/doc/figure/MA_transfer.png
new file mode 100644
index 0000000..70b1f52
Binary files /dev/null and b/doc/figure/MA_transfer.png differ
diff --git a/doc/figure/exp_ablation.png b/doc/figure/exp_ablation.png
new file mode 100644
index 0000000..8a71f73
Binary files /dev/null and b/doc/figure/exp_ablation.png differ
diff --git a/doc/figure/exp_comparison_with_SOTA.png b/doc/figure/exp_comparison_with_SOTA.png
new file mode 100644
index 0000000..b45c7fe
Binary files /dev/null and b/doc/figure/exp_comparison_with_SOTA.png differ
diff --git a/doc/figure/exp_comparison_with_SOTA2.png b/doc/figure/exp_comparison_with_SOTA2.png
new file mode 100644
index 0000000..7961074
Binary files /dev/null and b/doc/figure/exp_comparison_with_SOTA2.png differ
diff --git a/doc/figure/exp_comparison_with_baselines.png b/doc/figure/exp_comparison_with_baselines.png
new file mode 100644
index 0000000..b1d4b09
Binary files /dev/null and b/doc/figure/exp_comparison_with_baselines.png differ
diff --git a/doc/figure/smac_v2_config.png b/doc/figure/smac_v2_config.png
new file mode 100644
index 0000000..6644785
Binary files /dev/null and b/doc/figure/smac_v2_config.png differ
diff --git a/doc/figure/smac_v2_random_start_positions.png b/doc/figure/smac_v2_random_start_positions.png
new file mode 100644
index 0000000..3011a69
Binary files /dev/null and b/doc/figure/smac_v2_random_start_positions.png differ
diff --git a/doc/figure/smac_v2_random_unit_types.png b/doc/figure/smac_v2_random_unit_types.png
new file mode 100644
index 0000000..f90228e
Binary files /dev/null and b/doc/figure/smac_v2_random_unit_types.png differ
diff --git a/doc/figure/smac_v2_results.png b/doc/figure/smac_v2_results.png
new file mode 100644
index 0000000..15d710d
Binary files /dev/null and b/doc/figure/smac_v2_results.png differ
diff --git a/install_dependencies.sh b/install_dependencies.sh
new file mode 100644
index 0000000..e29c33a
--- /dev/null
+++ b/install_dependencies.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Install PyTorch and Python Packages
+# 3. Install Python dependencies
+echo 'Install PyTorch and Python dependencies...'
+# conda create -n pymarl python=3.8 -y
+# conda activate pymarl
+
+conda install pytorch torchvision torchaudio cudatoolkit=11.1 -c pytorch-lts -c nvidia -y
+pip install sacred numpy scipy gym==0.10.8 matplotlib seaborn \
+ pyyaml==5.3.1 pygame pytest probscale imageio snakeviz tensorboard-logger
+
+# pip install git+https://github.com/oxwhirl/smac.git
+# Do not need install SMAC anymore. We have integrated SMAC-V1 and SMAC-V2 in pymarl3/envs.
+pip install "protobuf<3.21"
+pip install "pysc2>=3.0.0"
+pip install "s2clientprotocol>=4.10.1.75800.0"
+pip install "absl-py>=0.1.0"
diff --git a/install_sc2.sh b/install_sc2.sh
new file mode 100644
index 0000000..0e0eb99
--- /dev/null
+++ b/install_sc2.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+# Install SC2 and add the custom maps
+
+# Clone the source code.
+#git clone git@github.com:tjuHaoXiaotian/pymarl3.git
+export PYMARL3_CODE_DIR=$(pwd)
+
+# 1. Install StarCraftII
+echo 'Install StarCraftII...'
+cd "$HOME"
+export SC2PATH="$HOME/StarCraftII"
+echo 'SC2PATH is set to '$SC2PATH
+if [ ! -d $SC2PATH ]; then
+ echo 'StarCraftII is not installed. Installing now ...';
+ wget http://blzdistsc2-a.akamaihd.net/Linux/SC2.4.10.zip
+ unzip -P iagreetotheeula SC2.4.10.zip
+else
+ echo 'StarCraftII is already installed.'
+fi
+
+# 2. Install the custom maps
+
+# Copy the maps to the target dir.
+echo 'Install SMACV1 and SMACV2 maps...'
+MAP_DIR="$SC2PATH/Maps/"
+if [ ! -d "$MAP_DIR/SMAC_Maps" ]; then
+ echo 'MAP_DIR is set to '$MAP_DIR
+ if [ ! -d $MAP_DIR ]; then
+ mkdir -p $MAP_DIR
+ fi
+ cp -r "$PYMARL3_CODE_DIR/src/envs/smac_v2/official/maps/SMAC_Maps" $MAP_DIR
+else
+ echo 'SMACV1 and SMACV2 maps are already installed.'
+fi
+echo 'StarCraft II and SMAC maps are installed.'
\ No newline at end of file
diff --git a/obs_config.json b/obs_config.json
new file mode 100644
index 0000000..193a782
--- /dev/null
+++ b/obs_config.json
@@ -0,0 +1,329 @@
+{
+ "2c_vs_64zg_obs" : {
+ "model_input_size": 404,
+ "env_obs_size": 332,
+ "n_agent": 2,
+ "n_enemy": 64,
+ "model_input_compose": {
+ "0": {"name": "move_feats", "size": 4},
+ "1": {"name": "enemy_feats", "size": [64, 5]},
+ "2": {"name": "ally_feats", "size": [1, 6]},
+ "3": {"name": "own_feats", "size": 2},
+ "4": {"name": "last_action", "size": 70},
+ "5": {"name": "agent_id", "size": 2}
+ }
+ },
+
+ "2m_vs_1z_obs" : {
+ "model_input_size": 25,
+ "env_obs_size": 16,
+ "n_agent": 2,
+ "n_enemy": 1,
+ "model_input_compose": {
+ "0": {"name": "move_feats", "size": 4},
+ "1": {"name": "enemy_feats", "size": [1, 6]},
+ "2": {"name": "ally_feats", "size": [1, 5]},
+ "3": {"name": "own_feats", "size": 1},
+ "4": {"name": "last_action", "size": 7},
+ "5": {"name": "agent_id", "size": 2}
+ }
+ },
+
+ "2s_vs_1sc_obs" : {
+ "model_input_size": 26,
+ "env_obs_size": 17,
+ "n_agent": 2,
+ "n_enemy": 1,
+ "model_input_compose": {
+ "0": {"name": "move_feats", "size": 4},
+ "1": {"name": "enemy_feats", "size": [1, 5]},
+ "2": {"name": "ally_feats", "size": [1, 6]},
+ "3": {"name": "own_feats", "size": 2},
+ "4": {"name": "last_action", "size": 7},
+ "5": {"name": "agent_id", "size": 2}
+ }
+ },
+
+ "2s3z_obs" : {
+ "model_input_size": 96,
+ "env_obs_size": 80,
+ "n_agent": 5,
+ "n_enemy": 5,
+ "model_input_compose": {
+ "0": {"name": "move_feats", "size": 4},
+ "1": {"name": "enemy_feats", "size": [5, 8]},
+ "2": {"name": "ally_feats", "size": [4, 8]},
+ "3": {"name": "own_feats", "size": 4},
+ "4": {"name": "last_action", "size": 11},
+ "5": {"name": "agent_id", "size": 5}
+ }
+ },
+
+ "3m_obs" : {
+ "model_input_size": 42,
+ "env_obs_size": 30,
+ "n_agent": 3,
+ "n_enemy": 3,
+ "model_input_compose": {
+ "0": {"name": "move_feats", "size": 4},
+ "1": {"name": "enemy_feats", "size": [3, 5]},
+ "2": {"name": "ally_feats", "size": [2, 5]},
+ "3": {"name": "own_feats", "size": 1},
+ "4": {"name": "last_action", "size": 9},
+ "5": {"name": "agent_id", "size": 3}
+ }
+ },
+
+ "3s_vs_3z_obs" : {
+ "model_input_size": 48,
+ "env_obs_size": 36,
+ "n_agent": 3,
+ "n_enemy": 3,
+ "model_input_compose": {
+ "0": {"name": "move_feats", "size": 4},
+ "1": {"name": "enemy_feats", "size": [3, 6]},
+ "2": {"name": "ally_feats", "size": [2, 6]},
+ "3": {"name": "own_feats", "size": 2},
+ "4": {"name": "last_action", "size": 9},
+ "5": {"name": "agent_id", "size": 3}
+ }
+ },
+
+ "3s_vs_4z_obs" : {
+ "model_input_size": 55,
+ "env_obs_size": 42,
+ "n_agent": 3,
+ "n_enemy": 4,
+ "model_input_compose": {
+ "0": {"name": "move_feats", "size": 4},
+ "1": {"name": "enemy_feats", "size": [4, 6]},
+ "2": {"name": "ally_feats", "size": [2, 6]},
+ "3": {"name": "own_feats", "size": 2},
+ "4": {"name": "last_action", "size": 10},
+ "5": {"name": "agent_id", "size": 3}
+ }
+ },
+
+ "3s_vs_5z_obs" : {
+ "model_input_size": 62,
+ "env_obs_size": 48,
+ "n_agent": 3,
+ "n_enemy": 5,
+ "model_input_compose": {
+ "0": {"name": "move_feats", "size": 4},
+ "1": {"name": "enemy_feats", "size": [5, 6]},
+ "2": {"name": "ally_feats", "size": [2, 6]},
+ "3": {"name": "own_feats", "size": 2},
+ "4": {"name": "last_action", "size": 11},
+ "5": {"name": "agent_id", "size": 3}
+ }
+ },
+
+ "3s5z_obs" : {
+ "model_input_size": 150,
+ "env_obs_size": 128,
+ "n_agent": 8,
+ "n_enemy": 8,
+ "model_input_compose": {
+ "0": {"name": "move_feats", "size": 4},
+ "1": {"name": "enemy_feats", "size": [8, 8]},
+ "2": {"name": "ally_feats", "size": [7, 8]},
+ "3": {"name": "own_feats", "size": 4},
+ "4": {"name": "last_action", "size": 14},
+ "5": {"name": "agent_id", "size": 8}
+ }
+ },
+
+ "3s5z_vs_3s6z_obs" : {
+ "model_input_size": 159,
+ "env_obs_size": 136,
+ "n_agent": 8,
+ "n_enemy": 9,
+ "model_input_compose": {
+ "0": {"name": "move_feats", "size": 4},
+ "1": {"name": "enemy_feats", "size": [9, 8]},
+ "2": {"name": "ally_feats", "size": [7, 8]},
+ "3": {"name": "own_feats", "size": 4},
+ "4": {"name": "last_action", "size": 15},
+ "5": {"name": "agent_id", "size": 8}
+ }
+ },
+
+ "5m_vs_6m_obs" : {
+ "model_input_size": 72,
+ "env_obs_size": 55,
+ "n_agent": 5, "n_enemy": 6,
+ "model_input_compose": {
+ "0": {"name": "move_feats", "size": 4},
+ "1": {"name": "enemy_feats", "size": [6, 5]},
+ "2": {"name": "ally_feats", "size": [4, 5]},
+ "3": {"name": "own_feats", "size": 1},
+ "4": {"name": "last_action", "size": 12},
+ "5": {"name": "agent_id", "size": 5}}
+ },
+
+ "6h_vs_8z_obs" : {
+ "model_input_size": 98,
+ "env_obs_size": 78,
+ "n_agent": 6,
+ "n_enemy": 8,
+ "model_input_compose": {
+ "0": {"name": "move_feats", "size": 4},
+ "1": {"name": "enemy_feats", "size": [8, 6]},
+ "2": {"name": "ally_feats", "size": [5, 5]},
+ "3": {"name": "own_feats", "size": 1},
+ "4": {"name": "last_action", "size": 14},
+ "5": {"name": "agent_id", "size": 6}
+ }
+ },
+
+ "8m_obs" : {
+ "model_input_size": 102,
+ "env_obs_size": 80,
+ "n_agent": 8,
+ "n_enemy": 8,
+ "model_input_compose": {
+ "0": {"name": "move_feats", "size": 4},
+ "1": {"name": "enemy_feats", "size": [8, 5]},
+ "2": {"name": "ally_feats", "size": [7, 5]},
+ "3": {"name": "own_feats", "size": 1},
+ "4": {"name": "last_action", "size": 14},
+ "5": {"name": "agent_id", "size": 8}
+ }
+ },
+
+ "8m_vs_9m_obs" : {
+ "model_input_size": 108,
+ "env_obs_size": 85,
+ "n_agent": 8,
+ "n_enemy": 9,
+ "model_input_compose": {
+ "0": {"name": "move_feats", "size": 4},
+ "1": {"name": "enemy_feats", "size": [9, 5]},
+ "2": {"name": "ally_feats", "size": [7, 5]},
+ "3": {"name": "own_feats", "size": 1},
+ "4": {"name": "last_action", "size": 15},
+ "5": {"name": "agent_id", "size": 8}
+ }
+ },
+
+ "10m_vs_11m_obs" : {
+ "model_input_size": 132,
+ "env_obs_size": 105,
+ "n_agent": 10,
+ "n_enemy": 11,
+ "model_input_compose": {
+ "0": {"name": "move_feats", "size": 4},
+ "1": {"name": "enemy_feats", "size": [11, 5]},
+ "2": {"name": "ally_feats", "size": [9, 5]},
+ "3": {"name": "own_feats", "size": 1},
+ "4": {"name": "last_action", "size": 17},
+ "5": {"name": "agent_id", "size": 10}
+ }
+ },
+
+ "25m_obs" : {
+ "model_input_size": 306,
+ "env_obs_size": 250,
+ "n_agent": 25,
+ "n_enemy": 25,
+ "model_input_compose": {
+ "0": {"name": "move_feats", "size": 4},
+ "1": {"name": "enemy_feats", "size": [25, 5]},
+ "2": {"name": "ally_feats", "size": [24, 5]},
+ "3": {"name": "own_feats", "size": 1},
+ "4": {"name": "last_action", "size": 31},
+ "5": {"name": "agent_id", "size": 25}
+ }
+ },
+
+ "27m_vs_30m_obs" : {
+ "model_input_size": 348,
+ "env_obs_size": 285,
+ "n_agent": 27,
+ "n_enemy": 30,
+ "model_input_compose": {
+ "0": {"name": "move_feats", "size": 4},
+ "1": {"name": "enemy_feats", "size": [30, 5]},
+ "2": {"name": "ally_feats", "size": [26, 5]},
+ "3": {"name": "own_feats", "size": 1},
+ "4": {"name": "last_action", "size": 36},
+ "5": {"name": "agent_id", "size": 27}
+ }
+ },
+
+ "bane_vs_bane_obs" : {
+ "model_input_size": 390,
+ "env_obs_size": 336,
+ "n_agent": 24,
+ "n_enemy": 24,
+ "model_input_compose": {
+ "0": {"name": "move_feats", "size": 4},
+ "1": {"name": "enemy_feats", "size": [24, 7]},
+ "2": {"name": "ally_feats", "size": [23, 7]},
+ "3": {"name": "own_feats", "size": 3},
+ "4": {"name": "last_action", "size": 30},
+ "5": {"name": "agent_id", "size": 24}
+ }
+ },
+
+ "corridor_obs" : {
+ "model_input_size": 192,
+ "env_obs_size": 156,
+ "n_agent": 6,
+ "n_enemy": 24,
+ "model_input_compose": {
+ "0": {"name": "move_feats", "size": 4},
+ "1": {"name": "enemy_feats", "size": [24, 5]},
+ "2": {"name": "ally_feats", "size": [5, 6]},
+ "3": {"name": "own_feats", "size": 2},
+ "4": {"name": "last_action", "size": 30},
+ "5": {"name": "agent_id", "size": 6}
+ }
+ },
+
+ "MMM_obs" : {
+ "model_input_size": 186,
+ "env_obs_size": 160,
+ "n_agent": 10,
+ "n_enemy": 10,
+ "model_input_compose": {
+ "0": {"name": "move_feats", "size": 4},
+ "1": {"name": "enemy_feats", "size": [10, 8]},
+ "2": {"name": "ally_feats", "size": [9, 8]},
+ "3": {"name": "own_feats", "size": 4},
+ "4": {"name": "last_action", "size": 16},
+ "5": {"name": "agent_id", "size": 10}
+ }
+ },
+
+ "MMM2_obs" : {
+ "model_input_size": 204,
+ "env_obs_size": 176,
+ "n_agent": 10,
+ "n_enemy": 12,
+ "model_input_compose": {
+ "0": {"name": "move_feats", "size": 4},
+ "1": {"name": "enemy_feats", "size": [12, 8]},
+ "2": {"name": "ally_feats", "size": [9, 8]},
+ "3": {"name": "own_feats", "size": 4},
+ "4": {"name": "last_action", "size": 18},
+ "5": {"name": "agent_id", "size": 10}
+ }
+ },
+
+ "so_many_baneling_obs" : {
+ "model_input_size": 247,
+ "env_obs_size": 202,
+ "n_agent": 7,
+ "n_enemy": 32,
+ "model_input_compose": {
+ "0": {"name": "move_feats", "size": 4},
+ "1": {"name": "enemy_feats", "size": [32, 5]},
+ "2": {"name": "ally_feats", "size": [6, 6]},
+ "3": {"name": "own_feats", "size": 2},
+ "4": {"name": "last_action", "size": 38},
+ "5": {"name": "agent_id", "size": 7}
+ }
+ }
+}
\ No newline at end of file
diff --git a/plotsmac.py b/plotsmac.py
new file mode 100644
index 0000000..c8a4234
--- /dev/null
+++ b/plotsmac.py
@@ -0,0 +1,117 @@
+import json
+import matplotlib.pyplot as plt
+import numpy as np
+import tkinter as tk
+from tkinter import ttk
+from scipy.signal import savgol_filter
+
+# 讀取多個 JSON 檔案
+def load_data(file_paths):
+ all_data = []
+ for file_path in file_paths:
+ with open(file_path) as f:
+ data = json.load(f)
+ # 處理特殊的數據格式(numpy.float64 對象)
+ processed_data = {}
+ for key, value in data.items():
+ if isinstance(value, list):
+ # 檢查是否包含字典格式的數值
+ if value and isinstance(value[0], dict) and 'value' in value[0]:
+ processed_data[key] = [item['value'] for item in value]
+ else:
+ processed_data[key] = value
+ else:
+ processed_data[key] = value
+ all_data.append(processed_data)
+ return all_data
+
+def smooth(y, window_length=51, polyorder=3):
+ return savgol_filter(y, window_length, polyorder)
+
+# 定義要繪製的數據
+def plot_data(data_list, keys, name_list, battle_name, smooth_window=2):
+ for key in keys:
+ # 創建一個圖形
+ fig, ax = plt.subplots()
+ # 設置坐標軸背景顏色
+ ax.set_facecolor('lightyellow')
+ # 設置網格顏色
+ ax.grid(color='green', linestyle='--', linewidth=0.5)
+
+ for data, name in zip(data_list, name_list):
+ if key in data:
+ x = data[key + '_T']
+ y = data[key]
+ y_smooth = smooth(y)
+ ax.plot(x, y_smooth, label=name)
+ #ax.plot(data[key + '_T'], data[key], label=name)
+ ax.set_xlabel('Time Steps')
+ ax.set_ylabel(key)
+ ax.set_title(battle_name)
+ ax.legend()
+
+ plt.show()
+
+def create_dynamic_window(data_list, keys, name_list, battle_name):
+ root = tk.Tk()
+ root.title("動態選擇要顯示的數據")
+
+ # 創建左側的選擇面板
+ select_frame = ttk.Frame(root)
+ select_frame.pack(side=tk.LEFT, fill=tk.Y, padx=10, pady=5)
+
+ # 創建變量來存儲選擇狀態
+ vars = []
+
+ def update_plot(*args):
+ # 獲取選中的數據
+ selected_names = [name for name, var in zip(name_list, vars) if var.get()]
+ filtered_data = [data for data, name in zip(data_list, name_list) if name in selected_names]
+ filtered_names = [name for name in name_list if name in selected_names]
+
+ # 清除所有現有的圖表
+ plt.close('all')
+ # 重新繪製圖表
+ if filtered_data: # 確保至少選擇了一個數據
+ plot_data(filtered_data, keys, filtered_names, battle_name)
+
+ # 創建複選框
+ for name in name_list:
+ var = tk.BooleanVar(value=True) # 默認全選
+ var.trace('w', update_plot) # 添加跟踪器,當值改變時更新圖表
+ vars.append(var)
+ cb = ttk.Checkbutton(select_frame, text=name, variable=var)
+ cb.pack(anchor='w', padx=5, pady=2)
+
+ # 添加全選/取消全選按鈕
+ def select_all():
+ for var in vars:
+ var.set(True)
+
+ def deselect_all():
+ for var in vars:
+ var.set(False)
+
+ ttk.Button(select_frame, text="全選", command=select_all).pack(pady=5)
+ ttk.Button(select_frame, text="取消全選", command=deselect_all).pack(pady=5)
+
+ # 初始繪圖
+ update_plot()
+
+ root.mainloop()
+
+# 使用者選擇要繪製的數據
+file_paths = ['results/sacred/10gen_protoss/feudal/5/info.json',
+ 'results/sacred/10gen_protoss/qmix/6/info.json',
+ ]
+
+data_list = load_data(file_paths)
+selected_keys = ['test_battle_won_mean','return_mean',"worker_loss","loss_td","manager_loss"]
+#selected_keys = ['battle_won_mean', 'loss']
+name_list = ['feudal', 'qmix']
+
+
+battle_name = '5protoss'
+
+# 使用新的動態選擇窗口
+create_dynamic_window(data_list, selected_keys, name_list, battle_name)
diff --git a/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/2000120/manager_agent.torch b/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/2000120/manager_agent.torch
new file mode 100644
index 0000000..367dbae
Binary files /dev/null and b/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/2000120/manager_agent.torch differ
diff --git a/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/2000120/manager_opt.torch b/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/2000120/manager_opt.torch
new file mode 100644
index 0000000..675b11e
Binary files /dev/null and b/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/2000120/manager_opt.torch differ
diff --git a/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/2000120/mixer.torch b/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/2000120/mixer.torch
new file mode 100644
index 0000000..84e92f2
Binary files /dev/null and b/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/2000120/mixer.torch differ
diff --git a/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/2000120/worker_agent.torch b/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/2000120/worker_agent.torch
new file mode 100644
index 0000000..313f614
Binary files /dev/null and b/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/2000120/worker_agent.torch differ
diff --git a/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/2000120/worker_opt.torch b/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/2000120/worker_opt.torch
new file mode 100644
index 0000000..d9147e4
Binary files /dev/null and b/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/2000120/worker_opt.torch differ
diff --git a/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/4000192/manager_agent.torch b/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/4000192/manager_agent.torch
new file mode 100644
index 0000000..5bf111c
Binary files /dev/null and b/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/4000192/manager_agent.torch differ
diff --git a/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/4000192/manager_opt.torch b/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/4000192/manager_opt.torch
new file mode 100644
index 0000000..d01a7ce
Binary files /dev/null and b/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/4000192/manager_opt.torch differ
diff --git a/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/4000192/mixer.torch b/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/4000192/mixer.torch
new file mode 100644
index 0000000..b50ee33
Binary files /dev/null and b/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/4000192/mixer.torch differ
diff --git a/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/4000192/worker_agent.torch b/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/4000192/worker_agent.torch
new file mode 100644
index 0000000..bc0dce5
Binary files /dev/null and b/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/4000192/worker_agent.torch differ
diff --git a/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/4000192/worker_opt.torch b/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/4000192/worker_opt.torch
new file mode 100644
index 0000000..aacc888
Binary files /dev/null and b/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/4000192/worker_opt.torch differ
diff --git a/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/4050105/manager_agent.torch b/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/4050105/manager_agent.torch
new file mode 100644
index 0000000..95bb5ab
Binary files /dev/null and b/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/4050105/manager_agent.torch differ
diff --git a/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/4050105/manager_opt.torch b/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/4050105/manager_opt.torch
new file mode 100644
index 0000000..7bdcf52
Binary files /dev/null and b/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/4050105/manager_opt.torch differ
diff --git a/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/4050105/mixer.torch b/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/4050105/mixer.torch
new file mode 100644
index 0000000..576cc31
Binary files /dev/null and b/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/4050105/mixer.torch differ
diff --git a/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/4050105/worker_agent.torch b/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/4050105/worker_agent.torch
new file mode 100644
index 0000000..fdf4696
Binary files /dev/null and b/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/4050105/worker_agent.torch differ
diff --git a/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/4050105/worker_opt.torch b/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/4050105/worker_opt.torch
new file mode 100644
index 0000000..bf1697d
Binary files /dev/null and b/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=feudal-agent=feudal/env_n=4/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/feudal__2025-01-06_04-17-39/4050105/worker_opt.torch differ
diff --git a/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=qmix-agent=n_rnn/env_n=4/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2025-01-06_00-24-03/2000205/agent.th b/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=qmix-agent=n_rnn/env_n=4/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2025-01-06_00-24-03/2000205/agent.th
new file mode 100644
index 0000000..33cd45b
Binary files /dev/null and b/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=qmix-agent=n_rnn/env_n=4/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2025-01-06_00-24-03/2000205/agent.th differ
diff --git a/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=qmix-agent=n_rnn/env_n=4/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2025-01-06_00-24-03/2000205/mixer.th b/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=qmix-agent=n_rnn/env_n=4/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2025-01-06_00-24-03/2000205/mixer.th
new file mode 100644
index 0000000..baac471
Binary files /dev/null and b/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=qmix-agent=n_rnn/env_n=4/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2025-01-06_00-24-03/2000205/mixer.th differ
diff --git a/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=qmix-agent=n_rnn/env_n=4/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2025-01-06_00-24-03/2000205/opt.th b/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=qmix-agent=n_rnn/env_n=4/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2025-01-06_00-24-03/2000205/opt.th
new file mode 100644
index 0000000..3f55533
Binary files /dev/null and b/results/models/sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0/algo=qmix-agent=n_rnn/env_n=4/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2025-01-06_00-24-03/2000205/opt.th differ
diff --git a/results/sacred/10gen_protoss/feudal/1/config.json b/results/sacred/10gen_protoss/feudal/1/config.json
new file mode 100644
index 0000000..a4c4ded
--- /dev/null
+++ b/results/sacred/10gen_protoss/feudal/1/config.json
@@ -0,0 +1,131 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "feudal",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_cpu_only": true,
+ "buffer_size": 5000,
+ "c": 1,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_hidden_dim": 64,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "embedding_dim": 16,
+ "enable_parallel_computing": false,
+ "env": "sc2_v2",
+ "env_args": {
+ "capability_config": {
+ "n_units": 5,
+ "start_positions": {
+ "dist_type": "surrounded_and_reflect",
+ "map_x": 32,
+ "map_y": 32,
+ "n_enemies": 5,
+ "p": 0.5
+ },
+ "team_gen": {
+ "dist_type": "weighted_teams",
+ "exception_unit_types": [
+ "colossus"
+ ],
+ "observe": true,
+ "unit_types": [
+ "stalker",
+ "zealot",
+ "colossus"
+ ],
+ "weights": [
+ 0.45,
+ 0.45,
+ 0.1
+ ]
+ }
+ },
+ "change_fov_with_move": false,
+ "conic_fov": false,
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "fully_observable": false,
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "kill_unit_step_mul": 2,
+ "map_name": "10gen_protoss",
+ "move_amount": 2,
+ "num_fov_actions": 12,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_own_pos": true,
+ "obs_pathing_grid": false,
+ "obs_starcraft": true,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 100000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gamma": 0.99,
+ "goal_dim": 16,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "hypernet_layers": 2,
+ "intrinsic_rewards_alpha": 0.001,
+ "label": "default_label",
+ "learner": "feudal_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.0005,
+ "mac": "feudal_mac",
+ "manager_hidden_dim": 64,
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "feudal",
+ "obs_agent_id": true,
+ "obs_last_action": false,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "repeat_id": 1,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "parallel",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 618959174,
+ "state_dim": 80,
+ "t_max": 4050000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 4,
+ "use_cuda": true,
+ "use_tensorboard": false,
+ "vf_coef": 0.5,
+ "worker_hidden_dim": 64
+}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/feudal/1/cout.txt b/results/sacred/10gen_protoss/feudal/1/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/10gen_protoss/feudal/1/info.json b/results/sacred/10gen_protoss/feudal/1/info.json
new file mode 100644
index 0000000..f7b6ca2
--- /dev/null
+++ b/results/sacred/10gen_protoss/feudal/1/info.json
@@ -0,0 +1,136 @@
+{
+ "battle_won_mean": [
+ 0.0
+ ],
+ "battle_won_mean_T": [
+ 216
+ ],
+ "dead_allies_mean": [
+ 5.0
+ ],
+ "dead_allies_mean_T": [
+ 216
+ ],
+ "dead_enemies_mean": [
+ 0.0
+ ],
+ "dead_enemies_mean_T": [
+ 216
+ ],
+ "ep_length_mean": [
+ 54.0
+ ],
+ "ep_length_mean_T": [
+ 216
+ ],
+ "epsilon": [
+ 1.0
+ ],
+ "epsilon_T": [
+ 216
+ ],
+ "return_max": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.99757281553398
+ }
+ ],
+ "return_max_T": [
+ 216
+ ],
+ "return_mean": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.5837944867537175
+ }
+ ],
+ "return_mean_T": [
+ 216
+ ],
+ "return_min": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.4034653465346536
+ }
+ ],
+ "return_min_T": [
+ 216
+ ],
+ "return_std": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.0627956390471756
+ }
+ ],
+ "return_std_T": [
+ 216
+ ],
+ "test_battle_won_mean": [
+ 0.0
+ ],
+ "test_battle_won_mean_T": [
+ 216
+ ],
+ "test_dead_allies_mean": [
+ 5.0
+ ],
+ "test_dead_allies_mean_T": [
+ 216
+ ],
+ "test_dead_enemies_mean": [
+ 0.96875
+ ],
+ "test_dead_enemies_mean_T": [
+ 216
+ ],
+ "test_ep_length_mean": [
+ 54.5
+ ],
+ "test_ep_length_mean_T": [
+ 216
+ ],
+ "test_return_max": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.280241935483874
+ }
+ ],
+ "test_return_max_T": [
+ 216
+ ],
+ "test_return_mean": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.9681482554589795
+ }
+ ],
+ "test_return_mean_T": [
+ 216
+ ],
+ "test_return_min": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.0
+ }
+ ],
+ "test_return_min_T": [
+ 216
+ ],
+ "test_return_std": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.06453424806387
+ }
+ ],
+ "test_return_std_T": [
+ 216
+ ]
+}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/feudal/1/metrics.json b/results/sacred/10gen_protoss/feudal/1/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/10gen_protoss/feudal/1/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/feudal/1/run.json b/results/sacred/10gen_protoss/feudal/1/run.json
new file mode 100644
index 0000000..5ceccc6
--- /dev/null
+++ b/results/sacred/10gen_protoss/feudal/1/run.json
@@ -0,0 +1,125 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_654daaa6534bcee62784d639ea63e51d.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "fail_trace": [
+ "Traceback (most recent call last):\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\sacred\\config\\captured_function.py\", line 42, in captured_function\n result = wrapped(*args, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\main.py\", line 44, in my_main\n run_REGISTRY[_config['run']](_run, config, _log)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 128, in run\n run_sequential(args=args, logger=logger)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 280, in run_sequential\n learner.train(episode_sample, runner.t_env, episode)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\learners\\FeUdal_learner.py\", line 146, in train\n cos_sim = F.cosine_similarity(temp_state_diff, temp_goals, dim=-1)\n",
+ "RuntimeError: The size of tensor a (57408) must match the size of tensor b (49920) at non-singleton dimension 0\n"
+ ],
+ "heartbeat": "2025-01-05T19:45:42.461491",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_size": 5000,
+ "epsilon_anneal_time": 100000,
+ "obs_agent_id": true,
+ "obs_last_action": false,
+ "runner": "parallel",
+ "t_max": 4050000,
+ "td_lambda": 0.6,
+ "use_tensorboard": false
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "obs_agent_id=True",
+ "obs_last_action=False",
+ "runner=parallel",
+ "batch_size_run=4",
+ "buffer_size=5000",
+ "t_max=4050000",
+ "epsilon_anneal_time=100000",
+ "batch_size=128",
+ "td_lambda=0.6",
+ "use_tensorboard=False"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2025-01-05T19:44:34.041398",
+ "status": "FAILED",
+ "stop_time": "2025-01-05T19:45:42.465521"
+}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/feudal/2/config.json b/results/sacred/10gen_protoss/feudal/2/config.json
new file mode 100644
index 0000000..f11bb05
--- /dev/null
+++ b/results/sacred/10gen_protoss/feudal/2/config.json
@@ -0,0 +1,131 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "feudal",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_cpu_only": true,
+ "buffer_size": 5000,
+ "c": 1,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_hidden_dim": 64,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "embedding_dim": 16,
+ "enable_parallel_computing": false,
+ "env": "sc2_v2",
+ "env_args": {
+ "capability_config": {
+ "n_units": 5,
+ "start_positions": {
+ "dist_type": "surrounded_and_reflect",
+ "map_x": 32,
+ "map_y": 32,
+ "n_enemies": 5,
+ "p": 0.5
+ },
+ "team_gen": {
+ "dist_type": "weighted_teams",
+ "exception_unit_types": [
+ "colossus"
+ ],
+ "observe": true,
+ "unit_types": [
+ "stalker",
+ "zealot",
+ "colossus"
+ ],
+ "weights": [
+ 0.45,
+ 0.45,
+ 0.1
+ ]
+ }
+ },
+ "change_fov_with_move": false,
+ "conic_fov": false,
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "fully_observable": false,
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "kill_unit_step_mul": 2,
+ "map_name": "10gen_protoss",
+ "move_amount": 2,
+ "num_fov_actions": 12,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_own_pos": true,
+ "obs_pathing_grid": false,
+ "obs_starcraft": true,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 100000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gamma": 0.99,
+ "goal_dim": 16,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "hypernet_layers": 2,
+ "intrinsic_rewards_alpha": 0.001,
+ "label": "default_label",
+ "learner": "feudal_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.0005,
+ "mac": "feudal_mac",
+ "manager_hidden_dim": 64,
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "feudal",
+ "obs_agent_id": true,
+ "obs_last_action": false,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "repeat_id": 1,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "parallel",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 414411342,
+ "state_dim": 80,
+ "t_max": 4050000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 4,
+ "use_cuda": true,
+ "use_tensorboard": false,
+ "vf_coef": 0.5,
+ "worker_hidden_dim": 64
+}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/feudal/2/cout.txt b/results/sacred/10gen_protoss/feudal/2/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/10gen_protoss/feudal/2/info.json b/results/sacred/10gen_protoss/feudal/2/info.json
new file mode 100644
index 0000000..3907e58
--- /dev/null
+++ b/results/sacred/10gen_protoss/feudal/2/info.json
@@ -0,0 +1,136 @@
+{
+ "battle_won_mean": [
+ 0.0
+ ],
+ "battle_won_mean_T": [
+ 265
+ ],
+ "dead_allies_mean": [
+ 5.0
+ ],
+ "dead_allies_mean_T": [
+ 265
+ ],
+ "dead_enemies_mean": [
+ 1.25
+ ],
+ "dead_enemies_mean_T": [
+ 265
+ ],
+ "ep_length_mean": [
+ 66.25
+ ],
+ "ep_length_mean_T": [
+ 265
+ ],
+ "epsilon": [
+ 1.0
+ ],
+ "epsilon_T": [
+ 265
+ ],
+ "return_max": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.983471074380164
+ }
+ ],
+ "return_max_T": [
+ 265
+ ],
+ "return_mean": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.397180549212854
+ }
+ ],
+ "return_mean_T": [
+ 265
+ ],
+ "return_min": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.0509708737864076
+ }
+ ],
+ "return_min_T": [
+ 265
+ ],
+ "return_std": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.1585009546446443
+ }
+ ],
+ "return_std_T": [
+ 265
+ ],
+ "test_battle_won_mean": [
+ 0.0
+ ],
+ "test_battle_won_mean_T": [
+ 265
+ ],
+ "test_dead_allies_mean": [
+ 5.0
+ ],
+ "test_dead_allies_mean_T": [
+ 265
+ ],
+ "test_dead_enemies_mean": [
+ 0.125
+ ],
+ "test_dead_enemies_mean_T": [
+ 265
+ ],
+ "test_ep_length_mean": [
+ 56.84375
+ ],
+ "test_ep_length_mean_T": [
+ 265
+ ],
+ "test_return_max": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.96078431372549
+ }
+ ],
+ "test_return_max_T": [
+ 265
+ ],
+ "test_return_mean": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.1870757990039804
+ }
+ ],
+ "test_return_mean_T": [
+ 265
+ ],
+ "test_return_min": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.0
+ }
+ ],
+ "test_return_min_T": [
+ 265
+ ],
+ "test_return_std": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.5675980789607145
+ }
+ ],
+ "test_return_std_T": [
+ 265
+ ]
+}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/feudal/2/metrics.json b/results/sacred/10gen_protoss/feudal/2/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/10gen_protoss/feudal/2/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/feudal/2/run.json b/results/sacred/10gen_protoss/feudal/2/run.json
new file mode 100644
index 0000000..24a01e3
--- /dev/null
+++ b/results/sacred/10gen_protoss/feudal/2/run.json
@@ -0,0 +1,125 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_654daaa6534bcee62784d639ea63e51d.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "fail_trace": [
+ "Traceback (most recent call last):\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\sacred\\config\\captured_function.py\", line 42, in captured_function\n result = wrapped(*args, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\main.py\", line 44, in my_main\n run_REGISTRY[_config['run']](_run, config, _log)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 128, in run\n run_sequential(args=args, logger=logger)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 280, in run_sequential\n learner.train(episode_sample, runner.t_env, episode)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\learners\\FeUdal_learner.py\", line 148, in train\n cos_sim = F.cosine_similarity(temp_state_diff, temp_goals, dim=-1)\n",
+ "RuntimeError: The size of tensor a (64768) must match the size of tensor b (56320) at non-singleton dimension 0\n"
+ ],
+ "heartbeat": "2025-01-05T19:50:34.270485",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_size": 5000,
+ "epsilon_anneal_time": 100000,
+ "obs_agent_id": true,
+ "obs_last_action": false,
+ "runner": "parallel",
+ "t_max": 4050000,
+ "td_lambda": 0.6,
+ "use_tensorboard": false
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "obs_agent_id=True",
+ "obs_last_action=False",
+ "runner=parallel",
+ "batch_size_run=4",
+ "buffer_size=5000",
+ "t_max=4050000",
+ "epsilon_anneal_time=100000",
+ "batch_size=128",
+ "td_lambda=0.6",
+ "use_tensorboard=False"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2025-01-05T19:49:13.745305",
+ "status": "FAILED",
+ "stop_time": "2025-01-05T19:50:34.274484"
+}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/feudal/3/config.json b/results/sacred/10gen_protoss/feudal/3/config.json
new file mode 100644
index 0000000..eef8f9b
--- /dev/null
+++ b/results/sacred/10gen_protoss/feudal/3/config.json
@@ -0,0 +1,131 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "feudal",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_cpu_only": true,
+ "buffer_size": 5000,
+ "c": 1,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_hidden_dim": 64,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "embedding_dim": 16,
+ "enable_parallel_computing": false,
+ "env": "sc2_v2",
+ "env_args": {
+ "capability_config": {
+ "n_units": 5,
+ "start_positions": {
+ "dist_type": "surrounded_and_reflect",
+ "map_x": 32,
+ "map_y": 32,
+ "n_enemies": 5,
+ "p": 0.5
+ },
+ "team_gen": {
+ "dist_type": "weighted_teams",
+ "exception_unit_types": [
+ "colossus"
+ ],
+ "observe": true,
+ "unit_types": [
+ "stalker",
+ "zealot",
+ "colossus"
+ ],
+ "weights": [
+ 0.45,
+ 0.45,
+ 0.1
+ ]
+ }
+ },
+ "change_fov_with_move": false,
+ "conic_fov": false,
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "fully_observable": false,
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "kill_unit_step_mul": 2,
+ "map_name": "10gen_protoss",
+ "move_amount": 2,
+ "num_fov_actions": 12,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_own_pos": true,
+ "obs_pathing_grid": false,
+ "obs_starcraft": true,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 100000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gamma": 0.99,
+ "goal_dim": 16,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "hypernet_layers": 2,
+ "intrinsic_rewards_alpha": 0.001,
+ "label": "default_label",
+ "learner": "feudal_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.0005,
+ "mac": "feudal_mac",
+ "manager_hidden_dim": 64,
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "feudal",
+ "obs_agent_id": true,
+ "obs_last_action": false,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "repeat_id": 1,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "parallel",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 521291150,
+ "state_dim": 92,
+ "t_max": 4050000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 4,
+ "use_cuda": true,
+ "use_tensorboard": false,
+ "vf_coef": 0.5,
+ "worker_hidden_dim": 64
+}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/feudal/3/cout.txt b/results/sacred/10gen_protoss/feudal/3/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/10gen_protoss/feudal/3/info.json b/results/sacred/10gen_protoss/feudal/3/info.json
new file mode 100644
index 0000000..78f4183
--- /dev/null
+++ b/results/sacred/10gen_protoss/feudal/3/info.json
@@ -0,0 +1,368 @@
+{
+ "battle_won_mean": [
+ 0.0,
+ 0.0
+ ],
+ "battle_won_mean_T": [
+ 211,
+ 10252
+ ],
+ "dead_allies_mean": [
+ 5.0,
+ 5.0
+ ],
+ "dead_allies_mean_T": [
+ 211,
+ 10252
+ ],
+ "dead_enemies_mean": [
+ 0.5,
+ 0.3111111111111111
+ ],
+ "dead_enemies_mean_T": [
+ 211,
+ 10252
+ ],
+ "ep_length_mean": [
+ 52.75,
+ 55.78333333333333
+ ],
+ "ep_length_mean_T": [
+ 211,
+ 10252
+ ],
+ "episode": [
+ 180
+ ],
+ "episode_T": [
+ 10030
+ ],
+ "episode_in_buffer": [
+ 180
+ ],
+ "episode_in_buffer_T": [
+ 10030
+ ],
+ "epsilon": [
+ 1.0,
+ 0.904715
+ ],
+ "epsilon_T": [
+ 211,
+ 10252
+ ],
+ "grad_norm_manager": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADIxNDAzMDE2ODY1NjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyMTQwMzAxNjg2NTYwcQFhLgEAAAAAAAAAnTV0RA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "grad_norm_manager_T": [
+ 7135
+ ],
+ "grad_norm_worker": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADIxNDAzMDE2NjI2NTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyMTQwMzAxNjYyNjU2cQFhLgEAAAAAAAAAHtgcRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "grad_norm_worker_T": [
+ 7135
+ ],
+ "hit_prob": [
+ 0.4580238461494446
+ ],
+ "hit_prob_T": [
+ 7135
+ ],
+ "manager_advantage": [
+ 0.1406308114528656
+ ],
+ "manager_advantage_T": [
+ 7135
+ ],
+ "manager_cos_sim": [
+ -0.00032846396788954735
+ ],
+ "manager_cos_sim_T": [
+ 7135
+ ],
+ "manager_loss": [
+ 10.693564414978027
+ ],
+ "manager_loss_T": [
+ 7135
+ ],
+ "q_taken_mean": [
+ 0.06587212437587596
+ ],
+ "q_taken_mean_T": [
+ 7135
+ ],
+ "return_max": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.432926829268293
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.95343137254902
+ }
+ ],
+ "return_max_T": [
+ 211,
+ 10252
+ ],
+ "return_mean": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.3768681252989
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.579137772995377
+ }
+ ],
+ "return_mean_T": [
+ 211,
+ 10252
+ ],
+ "return_min": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.2794117647058827
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.4308943089430894
+ }
+ ],
+ "return_min_T": [
+ 211,
+ 10252
+ ],
+ "return_std": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.89094258138571
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.421259942116347
+ }
+ ],
+ "return_std_T": [
+ 211,
+ 10252
+ ],
+ "target_mean": [
+ 0.10347526224159075
+ ],
+ "target_mean_T": [
+ 7135
+ ],
+ "td_error_abs": [
+ 0.22424918631197444
+ ],
+ "td_error_abs_T": [
+ 7135
+ ],
+ "test_battle_won_mean": [
+ 0.0,
+ 0.0
+ ],
+ "test_battle_won_mean_T": [
+ 211,
+ 10252
+ ],
+ "test_dead_allies_mean": [
+ 5.0,
+ 5.0
+ ],
+ "test_dead_allies_mean_T": [
+ 211,
+ 10252
+ ],
+ "test_dead_enemies_mean": [
+ 0.34375,
+ 1.0
+ ],
+ "test_dead_enemies_mean_T": [
+ 211,
+ 10252
+ ],
+ "test_ep_length_mean": [
+ 52.21875,
+ 57.5625
+ ],
+ "test_ep_length_mean_T": [
+ 211,
+ 10252
+ ],
+ "test_return_max": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.959349593495936
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 19.607843137254893
+ }
+ ],
+ "test_return_max_T": [
+ 211,
+ 10252
+ ],
+ "test_return_mean": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.185841104734577
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.841020668938307
+ }
+ ],
+ "test_return_mean_T": [
+ 211,
+ 10252
+ ],
+ "test_return_min": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.0
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.0
+ }
+ ],
+ "test_return_min_T": [
+ 211,
+ 10252
+ ],
+ "test_return_std": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.2407881654472965
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.010960846314079
+ }
+ ],
+ "test_return_std_T": [
+ 211,
+ 10252
+ ],
+ "worker_loss": [
+ 714.7169189453125
+ ],
+ "worker_loss_T": [
+ 7135
+ ]
+}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/feudal/3/metrics.json b/results/sacred/10gen_protoss/feudal/3/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/10gen_protoss/feudal/3/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/feudal/3/run.json b/results/sacred/10gen_protoss/feudal/3/run.json
new file mode 100644
index 0000000..302feec
--- /dev/null
+++ b/results/sacred/10gen_protoss/feudal/3/run.json
@@ -0,0 +1,116 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_654daaa6534bcee62784d639ea63e51d.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "heartbeat": "2025-01-05T19:53:52.751678",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_size": 5000,
+ "epsilon_anneal_time": 100000,
+ "obs_agent_id": true,
+ "obs_last_action": false,
+ "runner": "parallel",
+ "t_max": 4050000,
+ "td_lambda": 0.6,
+ "use_tensorboard": false
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "obs_agent_id=True",
+ "obs_last_action=False",
+ "runner=parallel",
+ "batch_size_run=4",
+ "buffer_size=5000",
+ "t_max=4050000",
+ "epsilon_anneal_time=100000",
+ "batch_size=128",
+ "td_lambda=0.6",
+ "use_tensorboard=False"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2025-01-05T19:51:28.620969",
+ "status": "INTERRUPTED",
+ "stop_time": "2025-01-05T19:53:52.757749"
+}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/feudal/4/config.json b/results/sacred/10gen_protoss/feudal/4/config.json
new file mode 100644
index 0000000..5081a6e
--- /dev/null
+++ b/results/sacred/10gen_protoss/feudal/4/config.json
@@ -0,0 +1,131 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "feudal",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_cpu_only": true,
+ "buffer_size": 5000,
+ "c": 1,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_hidden_dim": 64,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "embedding_dim": 16,
+ "enable_parallel_computing": false,
+ "env": "sc2_v2",
+ "env_args": {
+ "capability_config": {
+ "n_units": 5,
+ "start_positions": {
+ "dist_type": "surrounded_and_reflect",
+ "map_x": 32,
+ "map_y": 32,
+ "n_enemies": 5,
+ "p": 0.5
+ },
+ "team_gen": {
+ "dist_type": "weighted_teams",
+ "exception_unit_types": [
+ "colossus"
+ ],
+ "observe": true,
+ "unit_types": [
+ "stalker",
+ "zealot",
+ "colossus"
+ ],
+ "weights": [
+ 0.45,
+ 0.45,
+ 0.1
+ ]
+ }
+ },
+ "change_fov_with_move": false,
+ "conic_fov": false,
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "fully_observable": false,
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "kill_unit_step_mul": 2,
+ "map_name": "10gen_protoss",
+ "move_amount": 2,
+ "num_fov_actions": 12,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_own_pos": true,
+ "obs_pathing_grid": false,
+ "obs_starcraft": true,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 100000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gamma": 0.99,
+ "goal_dim": 16,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "hypernet_layers": 2,
+ "intrinsic_rewards_alpha": 0.001,
+ "label": "default_label",
+ "learner": "feudal_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.0005,
+ "mac": "feudal_mac",
+ "manager_hidden_dim": 64,
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "feudal",
+ "obs_agent_id": true,
+ "obs_last_action": false,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "repeat_id": 1,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "parallel",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 346146537,
+ "state_dim": 92,
+ "t_max": 4050000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 4,
+ "use_cuda": true,
+ "use_tensorboard": false,
+ "vf_coef": 0.5,
+ "worker_hidden_dim": 64
+}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/feudal/4/cout.txt b/results/sacred/10gen_protoss/feudal/4/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/10gen_protoss/feudal/4/info.json b/results/sacred/10gen_protoss/feudal/4/info.json
new file mode 100644
index 0000000..51aca4c
--- /dev/null
+++ b/results/sacred/10gen_protoss/feudal/4/info.json
@@ -0,0 +1,3312 @@
+{
+ "battle_won_mean": [
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.005555555555555556,
+ 0.011111111111111112,
+ 0.011111111111111112,
+ 0.029069767441860465,
+ 0.029069767441860465,
+ 0.06547619047619048,
+ 0.07386363636363637,
+ 0.1130952380952381,
+ 0.11046511627906977,
+ 0.08928571428571429,
+ 0.10714285714285714,
+ 0.06395348837209303,
+ 0.08928571428571429,
+ 0.10119047619047619
+ ],
+ "battle_won_mean_T": [
+ 233,
+ 10246,
+ 20406,
+ 30503,
+ 40606,
+ 50643,
+ 60644,
+ 70661,
+ 80831,
+ 90969,
+ 101260,
+ 111446,
+ 121467,
+ 131581,
+ 141726,
+ 151926,
+ 161935,
+ 172023
+ ],
+ "dead_allies_mean": [
+ 5.0,
+ 5.0,
+ 5.0,
+ 5.0,
+ 4.988888888888889,
+ 4.972222222222222,
+ 4.972222222222222,
+ 4.930232558139535,
+ 4.9476744186046515,
+ 4.839285714285714,
+ 4.8522727272727275,
+ 4.755952380952381,
+ 4.6976744186046515,
+ 4.815476190476191,
+ 4.755952380952381,
+ 4.837209302325581,
+ 4.785714285714286,
+ 4.767857142857143
+ ],
+ "dead_allies_mean_T": [
+ 233,
+ 10246,
+ 20406,
+ 30503,
+ 40606,
+ 50643,
+ 60644,
+ 70661,
+ 80831,
+ 90969,
+ 101260,
+ 111446,
+ 121467,
+ 131581,
+ 141726,
+ 151926,
+ 161935,
+ 172023
+ ],
+ "dead_enemies_mean": [
+ 1.25,
+ 0.4722222222222222,
+ 0.4574468085106383,
+ 0.7445652173913043,
+ 0.9333333333333333,
+ 1.1166666666666667,
+ 1.1277777777777778,
+ 1.4186046511627908,
+ 1.622093023255814,
+ 1.7321428571428572,
+ 1.8806818181818181,
+ 2.0,
+ 1.9593023255813953,
+ 1.744047619047619,
+ 1.8035714285714286,
+ 1.8430232558139534,
+ 2.0773809523809526,
+ 2.011904761904762
+ ],
+ "dead_enemies_mean_T": [
+ 233,
+ 10246,
+ 20406,
+ 30503,
+ 40606,
+ 50643,
+ 60644,
+ 70661,
+ 80831,
+ 90969,
+ 101260,
+ 111446,
+ 121467,
+ 131581,
+ 141726,
+ 151926,
+ 161935,
+ 172023
+ ],
+ "ep_length_mean": [
+ 58.25,
+ 55.62777777777778,
+ 54.04255319148936,
+ 54.875,
+ 56.12777777777778,
+ 55.76111111111111,
+ 55.56111111111111,
+ 58.23837209302326,
+ 59.127906976744185,
+ 60.345238095238095,
+ 58.47159090909091,
+ 60.63095238095238,
+ 58.26162790697674,
+ 60.20238095238095,
+ 60.38690476190476,
+ 59.30232558139535,
+ 59.57738095238095,
+ 60.04761904761905
+ ],
+ "ep_length_mean_T": [
+ 233,
+ 10246,
+ 20406,
+ 30503,
+ 40606,
+ 50643,
+ 60644,
+ 70661,
+ 80831,
+ 90969,
+ 101260,
+ 111446,
+ 121467,
+ 131581,
+ 141726,
+ 151926,
+ 161935,
+ 172023
+ ],
+ "episode": [
+ 180,
+ 368,
+ 552,
+ 732,
+ 912,
+ 1092,
+ 1268,
+ 1440,
+ 1608,
+ 1784,
+ 1952,
+ 2124,
+ 2292,
+ 2460,
+ 2632,
+ 2800,
+ 2968
+ ],
+ "episode_T": [
+ 10029,
+ 20170,
+ 30276,
+ 40361,
+ 50442,
+ 60455,
+ 70661,
+ 80831,
+ 90969,
+ 101260,
+ 111446,
+ 121467,
+ 131581,
+ 141726,
+ 151926,
+ 161935,
+ 172023
+ ],
+ "episode_in_buffer": [
+ 180,
+ 368,
+ 552,
+ 732,
+ 912,
+ 1092,
+ 1268,
+ 1440,
+ 1608,
+ 1784,
+ 1952,
+ 2124,
+ 2292,
+ 2460,
+ 2632,
+ 2800,
+ 2968
+ ],
+ "episode_in_buffer_T": [
+ 10029,
+ 20170,
+ 30276,
+ 40361,
+ 50442,
+ 60455,
+ 70661,
+ 80831,
+ 90969,
+ 101260,
+ 111446,
+ 121467,
+ 131581,
+ 141726,
+ 151926,
+ 161935,
+ 172023
+ ],
+ "epsilon": [
+ 1.0,
+ 0.9047245,
+ 0.808385,
+ 0.712378,
+ 0.6165705,
+ 0.5208010000000001,
+ 0.42567750000000004,
+ 0.33091500000000007,
+ 0.23459450000000004,
+ 0.13844500000000015,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05
+ ],
+ "epsilon_T": [
+ 233,
+ 10246,
+ 20406,
+ 30503,
+ 40606,
+ 50643,
+ 60644,
+ 70661,
+ 80831,
+ 90969,
+ 101260,
+ 111446,
+ 121467,
+ 131581,
+ 141726,
+ 151926,
+ 161935,
+ 172023
+ ],
+ "grad_norm_manager": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADIwMzkzMzU3ODQ0MDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyMDM5MzM1Nzg0NDAwcQFhLgEAAAAAAAAAkK5vRA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADIwMzkxNzE5ODYwMDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyMDM5MTcxOTg2MDAwcQFhLgEAAAAAAAAADe9SRA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADIwMzkxNzE5Mzg5NjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyMDM5MTcxOTM4OTYwcQFhLgEAAAAAAAAAMCvORA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADIwMzkxMjMxMzU3NDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyMDM5MTIzMTM1NzQ0cQFhLgEAAAAAAAAALsMaRQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADIwMzkxNzIwMzQ3NjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyMDM5MTcyMDM0NzY4cQFhLgEAAAAAAAAA8oZDRQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADIwMzkxNzIwMjc0NzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyMDM5MTcyMDI3NDcycQFhLgEAAAAAAAAADzhYRQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADIwMzkzMzI4MDQyNzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyMDM5MzMyODA0MjcycQFhLgEAAAAAAAAA0T5wRQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADIwMzkxMjMxNjI4MTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyMDM5MTIzMTYyODE2cQFhLgEAAAAAAAAA/CGKRQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADIwMzkxMjMxNTMxMjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyMDM5MTIzMTUzMTIwcQFhLgEAAAAAAAAAwiGRRQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADIwMzkzNDM0ODUzOTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyMDM5MzQzNDg1MzkycQFhLgEAAAAAAAAAT42dRQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADIwMzkzMzI3NjU2ODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyMDM5MzMyNzY1NjgwcQFhLgEAAAAAAAAAEM+nRQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADIwMzkzNDM1MjU2MTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyMDM5MzQzNTI1NjE2cQFhLgEAAAAAAAAAsNKqRQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADIwMzkzMzU3NzYzMzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyMDM5MzM1Nzc2MzM2cQFhLgEAAAAAAAAArpPORQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADIwMzkxMjMxMTM2NjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyMDM5MTIzMTEzNjY0cQFhLgEAAAAAAAAA+VfFRQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADIwMzkxMjMxNTI0NDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyMDM5MTIzMTUyNDQ4cQFhLgEAAAAAAAAAI6DhRQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADIwMzkzMzU3Nzk5ODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyMDM5MzM1Nzc5OTg0cQFhLgEAAAAAAAAACU33RQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADIwMzkzMzU2ODA4MTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyMDM5MzM1NjgwODE2cQFhLgEAAAAAAAAA1QX8RQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "grad_norm_manager_T": [
+ 7056,
+ 17188,
+ 27350,
+ 37443,
+ 47513,
+ 57594,
+ 67604,
+ 77823,
+ 88080,
+ 98095,
+ 108177,
+ 118255,
+ 128398,
+ 138576,
+ 148696,
+ 158904,
+ 168987
+ ],
+ "grad_norm_worker": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADIwMzkzMzU3NzI0MDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyMDM5MzM1NzcyNDAwcQFhLgEAAAAAAAAASoQyRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADIwMzkxNzE5ODIyNTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyMDM5MTcxOTgyMjU2cQFhLgEAAAAAAAAAEb6HRA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADIwMzkxNzE5NDEzNjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyMDM5MTcxOTQxMzYwcQFhLgEAAAAAAAAANWnfRA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADIwMzkxMjMxMzc2NjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyMDM5MTIzMTM3NjY0cQFhLgEAAAAAAAAAZjPPRA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADIwMzkxNzIwMjY1MTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyMDM5MTcyMDI2NTEycQFhLgEAAAAAAAAAR1TVRQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADIwMzkxNzIwMTg0NDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyMDM5MTcyMDE4NDQ4cQFhLgEAAAAAAAAA5df8RQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADIwMzkzMzI3OTYyMDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyMDM5MzMyNzk2MjA4cQFhLgEAAAAAAAAALoU6Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADIwMzkxMjMxNTQwODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyMDM5MTIzMTU0MDgwcQFhLgEAAAAAAAAA46chRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADIwMzkxMjMwODM2MTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyMDM5MTIzMDgzNjE2cQFhLgEAAAAAAAAA5KVORg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADIwMzkzNDM0ODk5MDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyMDM5MzQzNDg5OTA0cQFhLgEAAAAAAAAAv6F5Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADIwMzkzMzI3NjgwODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyMDM5MzMyNzY4MDgwcQFhLgEAAAAAAAAAzM6vRQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADIwMzkzNDM1MjgwMTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyMDM5MzQzNTI4MDE2cQFhLgEAAAAAAAAAFCJiRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADIwMzkzMzU3NjI5OTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyMDM5MzM1NzYyOTkycQFhLgEAAAAAAAAAdbKhRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADIwMzkxMjMxMDYyNzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyMDM5MTIzMTA2MjcycQFhLgEAAAAAAAAACH/MRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADIwMzkxMjMxNDE0MDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyMDM5MTIzMTQxNDA4cQFhLgEAAAAAAAAA6WVaRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADIwMzkzMzU3Nzg4MzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyMDM5MzM1Nzc4ODMycQFhLgEAAAAAAAAAMW+JRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADIwMzkzMzU4MDgzMDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyMDM5MzM1ODA4MzA0cQFhLgEAAAAAAAAAFqspRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "grad_norm_worker_T": [
+ 7056,
+ 17188,
+ 27350,
+ 37443,
+ 47513,
+ 57594,
+ 67604,
+ 77823,
+ 88080,
+ 98095,
+ 108177,
+ 118255,
+ 128398,
+ 138576,
+ 148696,
+ 158904,
+ 168987
+ ],
+ "hit_prob": [
+ 0.4625283479690552,
+ 0.48018133640289307,
+ 0.47543859481811523,
+ 0.49337711930274963,
+ 0.5260359644889832,
+ 0.5174561738967896,
+ 0.546875,
+ 0.5532989501953125,
+ 0.5754930973052979,
+ 0.5918435454368591,
+ 0.6163361072540283,
+ 0.6190822720527649,
+ 0.6588496565818787,
+ 0.6196382641792297,
+ 0.6466586589813232,
+ 0.6725119948387146,
+ 0.6463037729263306
+ ],
+ "hit_prob_T": [
+ 7056,
+ 17188,
+ 27350,
+ 37443,
+ 47513,
+ 57594,
+ 67604,
+ 77823,
+ 88080,
+ 98095,
+ 108177,
+ 118255,
+ 128398,
+ 138576,
+ 148696,
+ 158904,
+ 168987
+ ],
+ "manager_advantage": [
+ 0.17827916145324707,
+ -0.126869797706604,
+ -0.18326307833194733,
+ -0.2490502893924713,
+ -0.28038641810417175,
+ -0.3415907621383667,
+ -0.3459433317184448,
+ -0.37848103046417236,
+ -0.39563965797424316,
+ -0.430917352437973,
+ -0.46173295378685,
+ -0.48340317606925964,
+ -0.5467185974121094,
+ -0.5487329363822937,
+ -0.5862215161323547,
+ -0.6031898260116577,
+ -0.6954140663146973
+ ],
+ "manager_advantage_T": [
+ 7056,
+ 17188,
+ 27350,
+ 37443,
+ 47513,
+ 57594,
+ 67604,
+ 77823,
+ 88080,
+ 98095,
+ 108177,
+ 118255,
+ 128398,
+ 138576,
+ 148696,
+ 158904,
+ 168987
+ ],
+ "manager_cos_sim": [
+ -0.007258685305714607,
+ -0.06707026064395905,
+ -0.07516039162874222,
+ -0.0904148668050766,
+ -0.08719677478075027,
+ -0.10499168932437897,
+ -0.08633621782064438,
+ -0.08658603578805923,
+ -0.0802503228187561,
+ -0.08891929686069489,
+ -0.08517266064882278,
+ -0.08392487466335297,
+ -0.10865126550197601,
+ -0.0778808742761612,
+ -0.08868388086557388,
+ -0.07813122123479843,
+ -0.11740733683109283
+ ],
+ "manager_cos_sim_T": [
+ 7056,
+ 17188,
+ 27350,
+ 37443,
+ 47513,
+ 57594,
+ 67604,
+ 77823,
+ 88080,
+ 98095,
+ 108177,
+ 118255,
+ 128398,
+ 138576,
+ 148696,
+ 158904,
+ 168987
+ ],
+ "manager_loss": [
+ 115.5124740600586,
+ -1028.06201171875,
+ -1652.3564453125,
+ -2160.2451171875,
+ -2564.904541015625,
+ -3089.45947265625,
+ -3469.741943359375,
+ -3798.271484375,
+ -4202.58154296875,
+ -4482.2431640625,
+ -4802.984375,
+ -5141.43408203125,
+ -5724.68603515625,
+ -6142.302734375,
+ -6331.82177734375,
+ -6995.61181640625,
+ -7255.0810546875
+ ],
+ "manager_loss_T": [
+ 7056,
+ 17188,
+ 27350,
+ 37443,
+ 47513,
+ 57594,
+ 67604,
+ 77823,
+ 88080,
+ 98095,
+ 108177,
+ 118255,
+ 128398,
+ 138576,
+ 148696,
+ 158904,
+ 168987
+ ],
+ "q_taken_mean": [
+ -0.044565047698767006,
+ 0.014224379345350351,
+ 0.08169372247718591,
+ 0.14147329632636313,
+ 0.18351223858895488,
+ 0.22486390654425611,
+ 0.27711643603963587,
+ 0.27989202098444693,
+ 0.3774073515526647,
+ 0.41855172114505695,
+ 0.44644461303374655,
+ 0.4860768527909905,
+ 0.5170812206534499,
+ 0.550999698252414,
+ 0.47763490812991866,
+ 0.4575082012539915,
+ 0.5147622315710706
+ ],
+ "q_taken_mean_T": [
+ 7056,
+ 17188,
+ 27350,
+ 37443,
+ 47513,
+ 57594,
+ 67604,
+ 77823,
+ 88080,
+ 98095,
+ 108177,
+ 118255,
+ 128398,
+ 138576,
+ 148696,
+ 158904,
+ 168987
+ ],
+ "return_max": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.645390070921986
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.992907801418438
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.42622950819672
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.008264462809915
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.047619047619037
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.238095238095223
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.114754098360656
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 26.85714285714285
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.238095238095237
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.56666666666666
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 26.876190476190473
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.047619047619037
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.04761904761903
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 26.85714285714285
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 30.47619047619047
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.428571428571416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.371428571428556
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 26.61157024793389
+ }
+ ],
+ "return_max_T": [
+ 233,
+ 10246,
+ 20406,
+ 30503,
+ 40606,
+ 50643,
+ 60644,
+ 70661,
+ 80831,
+ 90969,
+ 101260,
+ 111446,
+ 121467,
+ 131581,
+ 141726,
+ 151926,
+ 161935,
+ 172023
+ ],
+ "return_mean": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.582775202068017
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.024079220002897
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.204934957063274
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.544645488239546
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.991965746869353
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.436948839901483
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.582637329797293
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.426525728851901
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.67469212216396
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.220782749738055
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.693006615008487
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.634172605686919
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.584739332845631
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.413728764746278
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.50221917136682
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.35787800347498
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.305142004157872
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.92113475074763
+ }
+ ],
+ "return_mean_T": [
+ 233,
+ 10246,
+ 20406,
+ 30503,
+ 40606,
+ 50643,
+ 60644,
+ 70661,
+ 80831,
+ 90969,
+ 101260,
+ 111446,
+ 121467,
+ 131581,
+ 141726,
+ 151926,
+ 161935,
+ 172023
+ ],
+ "return_min": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.6885245901639343
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.0780141843971631
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.6857142857142857
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.3904761904761906
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.703900709219858
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.6859504132231404
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.4738095238095243
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.0476190476190477
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.021276595744681
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.5225409836065575
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.7190082644628104
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.273809523809524
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.780141843971631
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.4754098360655739
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.0330578512396698
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.378099173553719
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.1735537190082646
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.226950354609929
+ }
+ ],
+ "return_min_T": [
+ 233,
+ 10246,
+ 20406,
+ 30503,
+ 40606,
+ 50643,
+ 60644,
+ 70661,
+ 80831,
+ 90969,
+ 101260,
+ 111446,
+ 121467,
+ 131581,
+ 141726,
+ 151926,
+ 161935,
+ 172023
+ ],
+ "return_std": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.1276446276833054
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.8778987374400224
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.5895492430588525
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.0480065640229466
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.589121361145563
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.5463848274438066
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.5172217073133885
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.122017284064612
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.011472807848675
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.5231883736930785
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.513280804222921
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.2024220444193965
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.658728610735705
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.865576127525635
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.16933906528407
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.325791967086691
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.506524440130765
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.558568968384401
+ }
+ ],
+ "return_std_T": [
+ 233,
+ 10246,
+ 20406,
+ 30503,
+ 40606,
+ 50643,
+ 60644,
+ 70661,
+ 80831,
+ 90969,
+ 101260,
+ 111446,
+ 121467,
+ 131581,
+ 141726,
+ 151926,
+ 161935,
+ 172023
+ ],
+ "target_mean": [
+ 0.00593172103639633,
+ 0.0206422264573594,
+ 0.08899192443053197,
+ 0.14061497789840852,
+ 0.19990191561483525,
+ 0.24190920161523974,
+ 0.29949911974985666,
+ 0.29972243775282464,
+ 0.3566758134879004,
+ 0.3969239173903108,
+ 0.4387415795024105,
+ 0.46657565927532174,
+ 0.4929229469642624,
+ 0.517249018257854,
+ 0.49672452314259036,
+ 0.4830155044238957,
+ 0.49739005072741194
+ ],
+ "target_mean_T": [
+ 7056,
+ 17188,
+ 27350,
+ 37443,
+ 47513,
+ 57594,
+ 67604,
+ 77823,
+ 88080,
+ 98095,
+ 108177,
+ 118255,
+ 128398,
+ 138576,
+ 148696,
+ 158904,
+ 168987
+ ],
+ "td_error_abs": [
+ 0.2643742118022339,
+ 0.12115991248981796,
+ 0.14378329300246268,
+ 0.1602097232784845,
+ 0.18114704385098465,
+ 0.20070449583740765,
+ 0.21440495482278527,
+ 0.20618957953733086,
+ 0.22427828766479577,
+ 0.23617609930916889,
+ 0.2137105137848657,
+ 0.2369837662195719,
+ 0.2303651804965681,
+ 0.2572352682897287,
+ 0.20813644468203948,
+ 0.2098192475843201,
+ 0.1965318435781533
+ ],
+ "td_error_abs_T": [
+ 7056,
+ 17188,
+ 27350,
+ 37443,
+ 47513,
+ 57594,
+ 67604,
+ 77823,
+ 88080,
+ 98095,
+ 108177,
+ 118255,
+ 128398,
+ 138576,
+ 148696,
+ 158904,
+ 168987
+ ],
+ "test_battle_won_mean": [
+ 0.0,
+ 0.0,
+ 0.21875,
+ 0.125,
+ 0.0625,
+ 0.125,
+ 0.25,
+ 0.21875,
+ 0.15625,
+ 0.03125,
+ 0.03125,
+ 0.0625,
+ 0.125,
+ 0.125,
+ 0.03125,
+ 0.09375,
+ 0.09375,
+ 0.15625
+ ],
+ "test_battle_won_mean_T": [
+ 233,
+ 10246,
+ 20406,
+ 30503,
+ 40606,
+ 50643,
+ 60644,
+ 70661,
+ 80831,
+ 90969,
+ 101260,
+ 111446,
+ 121467,
+ 131581,
+ 141726,
+ 151926,
+ 161935,
+ 172023
+ ],
+ "test_dead_allies_mean": [
+ 5.0,
+ 5.0,
+ 4.625,
+ 4.65625,
+ 4.84375,
+ 4.71875,
+ 4.5625,
+ 4.40625,
+ 4.6875,
+ 4.9375,
+ 4.875,
+ 4.90625,
+ 4.59375,
+ 4.78125,
+ 4.875,
+ 4.78125,
+ 4.75,
+ 4.59375
+ ],
+ "test_dead_allies_mean_T": [
+ 233,
+ 10246,
+ 20406,
+ 30503,
+ 40606,
+ 50643,
+ 60644,
+ 70661,
+ 80831,
+ 90969,
+ 101260,
+ 111446,
+ 121467,
+ 131581,
+ 141726,
+ 151926,
+ 161935,
+ 172023
+ ],
+ "test_dead_enemies_mean": [
+ 0.71875,
+ 0.8125,
+ 2.59375,
+ 2.625,
+ 2.25,
+ 2.53125,
+ 2.71875,
+ 2.59375,
+ 2.34375,
+ 1.9375,
+ 2.0625,
+ 2.3125,
+ 1.875,
+ 1.9375,
+ 1.75,
+ 2.0,
+ 1.875,
+ 2.53125
+ ],
+ "test_dead_enemies_mean_T": [
+ 233,
+ 10246,
+ 20406,
+ 30503,
+ 40606,
+ 50643,
+ 60644,
+ 70661,
+ 80831,
+ 90969,
+ 101260,
+ 111446,
+ 121467,
+ 131581,
+ 141726,
+ 151926,
+ 161935,
+ 172023
+ ],
+ "test_ep_length_mean": [
+ 49.28125,
+ 54.375,
+ 61.96875,
+ 67.3125,
+ 67.5625,
+ 59.40625,
+ 64.5625,
+ 59.5,
+ 66.125,
+ 62.3125,
+ 63.65625,
+ 60.90625,
+ 60.09375,
+ 60.125,
+ 59.1875,
+ 60.875,
+ 61.3125,
+ 62.28125
+ ],
+ "test_ep_length_mean_T": [
+ 233,
+ 10246,
+ 20406,
+ 30503,
+ 40606,
+ 50643,
+ 60644,
+ 70661,
+ 80831,
+ 90969,
+ 101260,
+ 111446,
+ 121467,
+ 131581,
+ 141726,
+ 151926,
+ 161935,
+ 172023
+ ],
+ "test_return_max": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.278688524590164
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.228571428571428
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.471074380165305
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.11428571428571
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.990476190476187
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.295081967213125
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.428571428571423
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 20.16528925619835
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.18095238095237
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.309917355371926
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 19.67213114754098
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.026190476190468
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 20.165289256198353
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.435714285714273
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 19.23809523809523
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.238095238095227
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.1900826446281
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 20.904761904761898
+ }
+ ],
+ "test_return_max_T": [
+ 233,
+ 10246,
+ 20406,
+ 30503,
+ 40606,
+ 50643,
+ 60644,
+ 70661,
+ 80831,
+ 90969,
+ 101260,
+ 111446,
+ 121467,
+ 131581,
+ 141726,
+ 151926,
+ 161935,
+ 172023
+ ],
+ "test_return_mean": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.172349572693216
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.831529751755548
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 11.151486927821125
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 11.177385180540874
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.98663651337802
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 11.284425728374678
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.09495973610397
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.574359586857334
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 11.074456782309538
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.555806530829521
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.615024465632308
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.615152093605229
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.371344437512724
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.297321277791688
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.504331681044748
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.61818973776439
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.290793377538435
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 11.457208167398406
+ }
+ ],
+ "test_return_mean_T": [
+ 233,
+ 10246,
+ 20406,
+ 30503,
+ 40606,
+ 50643,
+ 60644,
+ 70661,
+ 80831,
+ 90969,
+ 101260,
+ 111446,
+ 121467,
+ 131581,
+ 141726,
+ 151926,
+ 161935,
+ 172023
+ ],
+ "test_return_min": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.0
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.0
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.2695035460992905
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.870901639344262
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.9619047619047616
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.9586776859504145
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.8217213114754096
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.231404958677686
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.962765957446808
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.0514184397163122
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.180952380952381
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.042553191489361
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.3884297520661155
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.264462809917356
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.5195035460992907
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.5778688524590163
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.307377049180328
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.9098360655737725
+ }
+ ],
+ "test_return_min_T": [
+ 233,
+ 10246,
+ 20406,
+ 30503,
+ 40606,
+ 50643,
+ 60644,
+ 70661,
+ 80831,
+ 90969,
+ 101260,
+ 111446,
+ 121467,
+ 131581,
+ 141726,
+ 151926,
+ 161935,
+ 172023
+ ],
+ "test_return_std": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.3794900307297824
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.87556644503774
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.301227688760056
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.476082047163996
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.8597748443876614
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.169291950369022
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.294229298853561
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.816742061364013
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.195101345780509
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.087458445171775
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.7285393939309825
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.729509928250856
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.160157268800494
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.615670721691422
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.195260738543831
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.147454164441768
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.9689944839050812
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.24693847247732
+ }
+ ],
+ "test_return_std_T": [
+ 233,
+ 10246,
+ 20406,
+ 30503,
+ 40606,
+ 50643,
+ 60644,
+ 70661,
+ 80831,
+ 90969,
+ 101260,
+ 111446,
+ 121467,
+ 131581,
+ 141726,
+ 151926,
+ 161935,
+ 172023
+ ],
+ "worker_loss": [
+ 862.7616577148438,
+ 224.97927856445312,
+ 305.8400573730469,
+ 356.6451416015625,
+ 490.9036865234375,
+ 596.3804931640625,
+ 723.9983520507812,
+ 641.6268310546875,
+ 735.6061401367188,
+ 773.0672607421875,
+ 685.4548950195312,
+ 814.1674194335938,
+ 802.2513427734375,
+ 891.1700439453125,
+ 701.4380493164062,
+ 739.7481079101562,
+ 535.0346069335938
+ ],
+ "worker_loss_T": [
+ 7056,
+ 17188,
+ 27350,
+ 37443,
+ 47513,
+ 57594,
+ 67604,
+ 77823,
+ 88080,
+ 98095,
+ 108177,
+ 118255,
+ 128398,
+ 138576,
+ 148696,
+ 158904,
+ 168987
+ ]
+}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/feudal/4/metrics.json b/results/sacred/10gen_protoss/feudal/4/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/10gen_protoss/feudal/4/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/feudal/4/run.json b/results/sacred/10gen_protoss/feudal/4/run.json
new file mode 100644
index 0000000..4565d7b
--- /dev/null
+++ b/results/sacred/10gen_protoss/feudal/4/run.json
@@ -0,0 +1,116 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_654daaa6534bcee62784d639ea63e51d.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "heartbeat": "2025-01-05T20:17:27.881164",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_size": 5000,
+ "epsilon_anneal_time": 100000,
+ "obs_agent_id": true,
+ "obs_last_action": false,
+ "runner": "parallel",
+ "t_max": 4050000,
+ "td_lambda": 0.6,
+ "use_tensorboard": false
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "obs_agent_id=True",
+ "obs_last_action=False",
+ "runner=parallel",
+ "batch_size_run=4",
+ "buffer_size=5000",
+ "t_max=4050000",
+ "epsilon_anneal_time=100000",
+ "batch_size=128",
+ "td_lambda=0.6",
+ "use_tensorboard=False"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2025-01-05T19:55:31.376680",
+ "status": "INTERRUPTED",
+ "stop_time": "2025-01-05T20:17:27.914784"
+}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/feudal/5/config.json b/results/sacred/10gen_protoss/feudal/5/config.json
new file mode 100644
index 0000000..913ded5
--- /dev/null
+++ b/results/sacred/10gen_protoss/feudal/5/config.json
@@ -0,0 +1,131 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "feudal",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_cpu_only": true,
+ "buffer_size": 5000,
+ "c": 1,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_hidden_dim": 64,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "embedding_dim": 16,
+ "enable_parallel_computing": false,
+ "env": "sc2_v2",
+ "env_args": {
+ "capability_config": {
+ "n_units": 5,
+ "start_positions": {
+ "dist_type": "surrounded_and_reflect",
+ "map_x": 32,
+ "map_y": 32,
+ "n_enemies": 5,
+ "p": 0.5
+ },
+ "team_gen": {
+ "dist_type": "weighted_teams",
+ "exception_unit_types": [
+ "colossus"
+ ],
+ "observe": true,
+ "unit_types": [
+ "stalker",
+ "zealot",
+ "colossus"
+ ],
+ "weights": [
+ 0.45,
+ 0.45,
+ 0.1
+ ]
+ }
+ },
+ "change_fov_with_move": false,
+ "conic_fov": false,
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "fully_observable": false,
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "kill_unit_step_mul": 2,
+ "map_name": "10gen_protoss",
+ "move_amount": 2,
+ "num_fov_actions": 12,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_own_pos": true,
+ "obs_pathing_grid": false,
+ "obs_starcraft": true,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 100000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gamma": 0.99,
+ "goal_dim": 16,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "hypernet_layers": 2,
+ "intrinsic_rewards_alpha": 0.001,
+ "label": "default_label",
+ "learner": "feudal_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.0005,
+ "mac": "feudal_mac",
+ "manager_hidden_dim": 64,
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "feudal",
+ "obs_agent_id": true,
+ "obs_last_action": false,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "repeat_id": 1,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "parallel",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 549441411,
+ "state_dim": 92,
+ "t_max": 4050000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 4,
+ "use_cuda": true,
+ "use_tensorboard": false,
+ "vf_coef": 0.5,
+ "worker_hidden_dim": 64
+}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/feudal/5/cout.txt b/results/sacred/10gen_protoss/feudal/5/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/10gen_protoss/feudal/5/info.json b/results/sacred/10gen_protoss/feudal/5/info.json
new file mode 100644
index 0000000..6596464
--- /dev/null
+++ b/results/sacred/10gen_protoss/feudal/5/info.json
@@ -0,0 +1,73784 @@
+{
+ "battle_won_mean": [
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.017045454545454544,
+ 0.005681818181818182,
+ 0.005813953488372093,
+ 0.024390243902439025,
+ 0.03048780487804878,
+ 0.04878048780487805,
+ 0.054878048780487805,
+ 0.054878048780487805,
+ 0.041666666666666664,
+ 0.06395348837209303,
+ 0.0375,
+ 0.09146341463414634,
+ 0.09375,
+ 0.06547619047619048,
+ 0.13414634146341464,
+ 0.09375,
+ 0.07926829268292683,
+ 0.09146341463414634,
+ 0.10714285714285714,
+ 0.06547619047619048,
+ 0.09523809523809523,
+ 0.0975609756097561,
+ 0.07142857142857142,
+ 0.10119047619047619,
+ 0.10365853658536585,
+ 0.10975609756097561,
+ 0.08928571428571429,
+ 0.13095238095238096,
+ 0.13095238095238096,
+ 0.1402439024390244,
+ 0.07738095238095238,
+ 0.12804878048780488,
+ 0.08928571428571429,
+ 0.10365853658536585,
+ 0.11585365853658537,
+ 0.1511627906976744,
+ 0.1744186046511628,
+ 0.07738095238095238,
+ 0.1625,
+ 0.12790697674418605,
+ 0.17857142857142858,
+ 0.13953488372093023,
+ 0.11046511627906977,
+ 0.13095238095238096,
+ 0.13372093023255813,
+ 0.12209302325581395,
+ 0.14772727272727273,
+ 0.12790697674418605,
+ 0.14534883720930233,
+ 0.19767441860465115,
+ 0.17261904761904762,
+ 0.16666666666666666,
+ 0.19767441860465115,
+ 0.19186046511627908,
+ 0.18181818181818182,
+ 0.16279069767441862,
+ 0.1511627906976744,
+ 0.20348837209302326,
+ 0.14772727272727273,
+ 0.13095238095238096,
+ 0.10714285714285714,
+ 0.13690476190476192,
+ 0.13095238095238096,
+ 0.12209302325581395,
+ 0.11666666666666667,
+ 0.12209302325581395,
+ 0.1511627906976744,
+ 0.14772727272727273,
+ 0.12790697674418605,
+ 0.11627906976744186,
+ 0.1569767441860465,
+ 0.13414634146341464,
+ 0.13690476190476192,
+ 0.11627906976744186,
+ 0.11363636363636363,
+ 0.15476190476190477,
+ 0.1569767441860465,
+ 0.1524390243902439,
+ 0.08139534883720931,
+ 0.11627906976744186,
+ 0.19318181818181818,
+ 0.125,
+ 0.21428571428571427,
+ 0.10465116279069768,
+ 0.19767441860465115,
+ 0.13636363636363635,
+ 0.13095238095238096,
+ 0.16463414634146342,
+ 0.13095238095238096,
+ 0.12790697674418605,
+ 0.13372093023255813,
+ 0.1569767441860465,
+ 0.1744186046511628,
+ 0.14204545454545456,
+ 0.1511627906976744,
+ 0.18023255813953487,
+ 0.1488095238095238,
+ 0.11363636363636363,
+ 0.13372093023255813,
+ 0.17857142857142858,
+ 0.13068181818181818,
+ 0.25,
+ 0.14204545454545456,
+ 0.14204545454545456,
+ 0.14204545454545456,
+ 0.18023255813953487,
+ 0.12777777777777777,
+ 0.17613636363636365,
+ 0.17222222222222222,
+ 0.11046511627906977,
+ 0.19186046511627908,
+ 0.1534090909090909,
+ 0.19318181818181818,
+ 0.16477272727272727,
+ 0.1590909090909091,
+ 0.13372093023255813,
+ 0.10795454545454546,
+ 0.15476190476190477,
+ 0.11046511627906977,
+ 0.1511627906976744,
+ 0.15476190476190477,
+ 0.15853658536585366,
+ 0.14534883720930233,
+ 0.14285714285714285,
+ 0.1511627906976744,
+ 0.12209302325581395,
+ 0.1686046511627907,
+ 0.14285714285714285,
+ 0.21511627906976744,
+ 0.12790697674418605,
+ 0.14204545454545456,
+ 0.17222222222222222,
+ 0.12790697674418605,
+ 0.18023255813953487,
+ 0.16071428571428573,
+ 0.23295454545454544,
+ 0.17857142857142858,
+ 0.14285714285714285,
+ 0.18604651162790697,
+ 0.19186046511627908,
+ 0.15476190476190477,
+ 0.16666666666666666,
+ 0.19642857142857142,
+ 0.13068181818181818,
+ 0.17857142857142858,
+ 0.1686046511627907,
+ 0.1488095238095238,
+ 0.20238095238095238,
+ 0.15555555555555556,
+ 0.125,
+ 0.23809523809523808,
+ 0.1569767441860465,
+ 0.18452380952380953,
+ 0.2261904761904762,
+ 0.21511627906976744,
+ 0.19767441860465115,
+ 0.18604651162790697,
+ 0.19767441860465115,
+ 0.19642857142857142,
+ 0.23214285714285715,
+ 0.17073170731707318,
+ 0.15476190476190477,
+ 0.20348837209302326,
+ 0.18452380952380953,
+ 0.25595238095238093,
+ 0.19186046511627908,
+ 0.20454545454545456,
+ 0.16279069767441862,
+ 0.18452380952380953,
+ 0.2261904761904762,
+ 0.1686046511627907,
+ 0.19767441860465115,
+ 0.21511627906976744,
+ 0.15853658536585366,
+ 0.2261904761904762,
+ 0.19642857142857142,
+ 0.22093023255813954,
+ 0.1511627906976744,
+ 0.17857142857142858,
+ 0.23214285714285715,
+ 0.1524390243902439,
+ 0.21428571428571427,
+ 0.16071428571428573,
+ 0.12790697674418605,
+ 0.21341463414634146,
+ 0.1686046511627907,
+ 0.18452380952380953,
+ 0.12790697674418605,
+ 0.18023255813953487,
+ 0.13372093023255813,
+ 0.17857142857142858,
+ 0.13095238095238096,
+ 0.20833333333333334,
+ 0.16666666666666666,
+ 0.16666666666666666,
+ 0.16666666666666666,
+ 0.17045454545454544,
+ 0.23780487804878048,
+ 0.18604651162790697,
+ 0.17261904761904762,
+ 0.21511627906976744,
+ 0.13690476190476192,
+ 0.19186046511627908,
+ 0.1569767441860465,
+ 0.19767441860465115,
+ 0.16666666666666666,
+ 0.19186046511627908,
+ 0.18452380952380953,
+ 0.21341463414634146,
+ 0.13372093023255813,
+ 0.17857142857142858,
+ 0.22023809523809523,
+ 0.2261904761904762,
+ 0.17857142857142858,
+ 0.10975609756097561,
+ 0.21428571428571427,
+ 0.21341463414634146,
+ 0.17261904761904762,
+ 0.17261904761904762,
+ 0.21951219512195122,
+ 0.22023809523809523,
+ 0.17857142857142858,
+ 0.18452380952380953,
+ 0.23780487804878048,
+ 0.17261904761904762,
+ 0.11904761904761904,
+ 0.18452380952380953,
+ 0.1488095238095238,
+ 0.20833333333333334,
+ 0.16666666666666666,
+ 0.1569767441860465,
+ 0.12790697674418605,
+ 0.14204545454545456,
+ 0.16477272727272727,
+ 0.19047619047619047,
+ 0.18452380952380953,
+ 0.20238095238095238,
+ 0.19642857142857142,
+ 0.15476190476190477,
+ 0.16666666666666666,
+ 0.19186046511627908,
+ 0.19047619047619047,
+ 0.20238095238095238,
+ 0.15853658536585366,
+ 0.1488095238095238,
+ 0.21511627906976744,
+ 0.20121951219512196,
+ 0.20238095238095238,
+ 0.25595238095238093,
+ 0.1524390243902439,
+ 0.1625,
+ 0.15853658536585366,
+ 0.20238095238095238,
+ 0.16463414634146342,
+ 0.19186046511627908,
+ 0.23809523809523808,
+ 0.22023809523809523,
+ 0.19047619047619047,
+ 0.16071428571428573,
+ 0.20121951219512196,
+ 0.2375,
+ 0.16071428571428573,
+ 0.17073170731707318,
+ 0.16071428571428573,
+ 0.22560975609756098,
+ 0.14375,
+ 0.18292682926829268,
+ 0.19047619047619047,
+ 0.23170731707317074,
+ 0.18292682926829268,
+ 0.13095238095238096,
+ 0.18292682926829268,
+ 0.18452380952380953,
+ 0.23809523809523808,
+ 0.22560975609756098,
+ 0.20121951219512196,
+ 0.25595238095238093,
+ 0.1488095238095238,
+ 0.19047619047619047,
+ 0.19375,
+ 0.2073170731707317,
+ 0.23780487804878048,
+ 0.1858974358974359,
+ 0.23170731707317074,
+ 0.16071428571428573,
+ 0.21341463414634146,
+ 0.20121951219512196,
+ 0.22560975609756098,
+ 0.2621951219512195,
+ 0.22023809523809523,
+ 0.2073170731707317,
+ 0.21341463414634146,
+ 0.21428571428571427,
+ 0.20238095238095238,
+ 0.1951219512195122,
+ 0.19047619047619047,
+ 0.18125,
+ 0.14634146341463414,
+ 0.20833333333333334,
+ 0.20348837209302326,
+ 0.13414634146341464,
+ 0.21428571428571427,
+ 0.21341463414634146,
+ 0.25595238095238093,
+ 0.1744186046511628,
+ 0.23214285714285715,
+ 0.20238095238095238,
+ 0.18452380952380953,
+ 0.19047619047619047,
+ 0.19767441860465115,
+ 0.22674418604651161,
+ 0.19186046511627908,
+ 0.23125,
+ 0.19642857142857142,
+ 0.23170731707317074,
+ 0.16666666666666666,
+ 0.14285714285714285,
+ 0.2125,
+ 0.19047619047619047,
+ 0.23809523809523808,
+ 0.16463414634146342,
+ 0.25609756097560976,
+ 0.14534883720930233,
+ 0.19186046511627908,
+ 0.25,
+ 0.23170731707317074,
+ 0.26744186046511625,
+ 0.20348837209302326,
+ 0.22093023255813954,
+ 0.17857142857142858,
+ 0.23780487804878048,
+ 0.23214285714285715,
+ 0.22560975609756098,
+ 0.1744186046511628,
+ 0.20121951219512196,
+ 0.22023809523809523,
+ 0.18023255813953487,
+ 0.19047619047619047,
+ 0.23809523809523808,
+ 0.19047619047619047,
+ 0.19886363636363635,
+ 0.16279069767441862,
+ 0.23780487804878048,
+ 0.22093023255813954,
+ 0.14534883720930233,
+ 0.17073170731707318,
+ 0.18902439024390244,
+ 0.2804878048780488,
+ 0.1744186046511628,
+ 0.25595238095238093,
+ 0.1686046511627907,
+ 0.2261904761904762,
+ 0.16071428571428573,
+ 0.27325581395348836,
+ 0.19642857142857142,
+ 0.18023255813953487,
+ 0.22023809523809523,
+ 0.19642857142857142,
+ 0.13095238095238096,
+ 0.2556818181818182,
+ 0.26744186046511625,
+ 0.2558139534883721,
+ 0.20930232558139536,
+ 0.23214285714285715,
+ 0.2261904761904762,
+ 0.24404761904761904,
+ 0.2619047619047619,
+ 0.2804878048780488,
+ 0.20238095238095238,
+ 0.19047619047619047,
+ 0.19642857142857142,
+ 0.18452380952380953,
+ 0.20121951219512196,
+ 0.25,
+ 0.2073170731707317,
+ 0.27380952380952384,
+ 0.21951219512195122,
+ 0.21428571428571427,
+ 0.25609756097560976,
+ 0.22093023255813954,
+ 0.23809523809523808,
+ 0.20238095238095238,
+ 0.21428571428571427,
+ 0.20238095238095238,
+ 0.23214285714285715,
+ 0.18604651162790697,
+ 0.22674418604651161,
+ 0.18023255813953487,
+ 0.28488372093023256,
+ 0.21875,
+ 0.19767441860465115,
+ 0.23780487804878048,
+ 0.21511627906976744,
+ 0.19767441860465115,
+ 0.22674418604651161,
+ 0.2616279069767442
+ ],
+ "battle_won_mean_T": [
+ 194,
+ 10374,
+ 20428,
+ 30500,
+ 40676,
+ 50873,
+ 61038,
+ 71210,
+ 81230,
+ 91272,
+ 101424,
+ 111431,
+ 121432,
+ 131673,
+ 141819,
+ 151830,
+ 161986,
+ 172109,
+ 182334,
+ 192453,
+ 202496,
+ 212599,
+ 222635,
+ 232764,
+ 242814,
+ 252819,
+ 262843,
+ 272899,
+ 283084,
+ 293094,
+ 303310,
+ 313472,
+ 323617,
+ 333784,
+ 343903,
+ 353921,
+ 364085,
+ 374287,
+ 384290,
+ 394296,
+ 404471,
+ 414560,
+ 424686,
+ 434696,
+ 444926,
+ 455104,
+ 465150,
+ 475252,
+ 485342,
+ 495399,
+ 505436,
+ 515696,
+ 525789,
+ 535930,
+ 545945,
+ 555945,
+ 566049,
+ 576122,
+ 586317,
+ 596519,
+ 606748,
+ 616889,
+ 627158,
+ 637260,
+ 647263,
+ 657273,
+ 667473,
+ 677721,
+ 687849,
+ 698027,
+ 708100,
+ 718102,
+ 728312,
+ 738459,
+ 748659,
+ 758695,
+ 768913,
+ 778922,
+ 789065,
+ 799266,
+ 809346,
+ 819530,
+ 829586,
+ 839697,
+ 849860,
+ 860025,
+ 870220,
+ 880415,
+ 890511,
+ 900525,
+ 910705,
+ 920715,
+ 930760,
+ 940766,
+ 950940,
+ 961103,
+ 971321,
+ 981450,
+ 991606,
+ 1001651,
+ 1011710,
+ 1021718,
+ 1031978,
+ 1042077,
+ 1052110,
+ 1062289,
+ 1072420,
+ 1082524,
+ 1092746,
+ 1102920,
+ 1112940,
+ 1122984,
+ 1133088,
+ 1143284,
+ 1153391,
+ 1163395,
+ 1173498,
+ 1183612,
+ 1193776,
+ 1203953,
+ 1213973,
+ 1224024,
+ 1234153,
+ 1244295,
+ 1254356,
+ 1264497,
+ 1274565,
+ 1284604,
+ 1294621,
+ 1304713,
+ 1314851,
+ 1325035,
+ 1335188,
+ 1345253,
+ 1355510,
+ 1365716,
+ 1375874,
+ 1386066,
+ 1396208,
+ 1406386,
+ 1416515,
+ 1426604,
+ 1436650,
+ 1446726,
+ 1456821,
+ 1466930,
+ 1477099,
+ 1487150,
+ 1497352,
+ 1507391,
+ 1517564,
+ 1527624,
+ 1537679,
+ 1547897,
+ 1558095,
+ 1568161,
+ 1578315,
+ 1588334,
+ 1598393,
+ 1608483,
+ 1618592,
+ 1628657,
+ 1638781,
+ 1649029,
+ 1659119,
+ 1669119,
+ 1679375,
+ 1689413,
+ 1699570,
+ 1709768,
+ 1719828,
+ 1729948,
+ 1740177,
+ 1750184,
+ 1760371,
+ 1770392,
+ 1780586,
+ 1790740,
+ 1800756,
+ 1810941,
+ 1820956,
+ 1831037,
+ 1841313,
+ 1851534,
+ 1861656,
+ 1871864,
+ 1882025,
+ 1892117,
+ 1902270,
+ 1912333,
+ 1922467,
+ 1932708,
+ 1942801,
+ 1952927,
+ 1963074,
+ 1973090,
+ 1983234,
+ 1993269,
+ 2003474,
+ 2013561,
+ 2023661,
+ 2033881,
+ 2043963,
+ 2054119,
+ 2064154,
+ 2074351,
+ 2084444,
+ 2094600,
+ 2104748,
+ 2114931,
+ 2124947,
+ 2135163,
+ 2145354,
+ 2155508,
+ 2165692,
+ 2175715,
+ 2185926,
+ 2196148,
+ 2206249,
+ 2216432,
+ 2226606,
+ 2236624,
+ 2246771,
+ 2256780,
+ 2266859,
+ 2276957,
+ 2287104,
+ 2297302,
+ 2307303,
+ 2317390,
+ 2327462,
+ 2337611,
+ 2347768,
+ 2357940,
+ 2368145,
+ 2378307,
+ 2388379,
+ 2398570,
+ 2408757,
+ 2418798,
+ 2428888,
+ 2438913,
+ 2449116,
+ 2459260,
+ 2469434,
+ 2479626,
+ 2489833,
+ 2499991,
+ 2510020,
+ 2520059,
+ 2530270,
+ 2540433,
+ 2550496,
+ 2560624,
+ 2570804,
+ 2580874,
+ 2590993,
+ 2601088,
+ 2611125,
+ 2621242,
+ 2631351,
+ 2641522,
+ 2651675,
+ 2661736,
+ 2671810,
+ 2681946,
+ 2692137,
+ 2702320,
+ 2712415,
+ 2722550,
+ 2732570,
+ 2742690,
+ 2752870,
+ 2762941,
+ 2773018,
+ 2783159,
+ 2793224,
+ 2803361,
+ 2813487,
+ 2823679,
+ 2833919,
+ 2844122,
+ 2854371,
+ 2864515,
+ 2874568,
+ 2884568,
+ 2894772,
+ 2904828,
+ 2914915,
+ 2925139,
+ 2935385,
+ 2945575,
+ 2955784,
+ 2965878,
+ 2976079,
+ 2986111,
+ 2996149,
+ 3006390,
+ 3016621,
+ 3026662,
+ 3036882,
+ 3046930,
+ 3057109,
+ 3067327,
+ 3077504,
+ 3087571,
+ 3097605,
+ 3107719,
+ 3117777,
+ 3128009,
+ 3138168,
+ 3148229,
+ 3158481,
+ 3168634,
+ 3178702,
+ 3188844,
+ 3199082,
+ 3209229,
+ 3219299,
+ 3229532,
+ 3239753,
+ 3249900,
+ 3259919,
+ 3270054,
+ 3280228,
+ 3290271,
+ 3300374,
+ 3310465,
+ 3320623,
+ 3330643,
+ 3340717,
+ 3350810,
+ 3360905,
+ 3371083,
+ 3381150,
+ 3391367,
+ 3401585,
+ 3411654,
+ 3421858,
+ 3431907,
+ 3442063,
+ 3452100,
+ 3462350,
+ 3472490,
+ 3482659,
+ 3492889,
+ 3503095,
+ 3513118,
+ 3523288,
+ 3533471,
+ 3543630,
+ 3553721,
+ 3563794,
+ 3573998,
+ 3584105,
+ 3594290,
+ 3604367,
+ 3614553,
+ 3624782,
+ 3634831,
+ 3644898,
+ 3654942,
+ 3664952,
+ 3675136,
+ 3685260,
+ 3695334,
+ 3705582,
+ 3715735,
+ 3725762,
+ 3735774,
+ 3745999,
+ 3756145,
+ 3766225,
+ 3776477,
+ 3786555,
+ 3796691,
+ 3806809,
+ 3816898,
+ 3826969,
+ 3837146,
+ 3847388,
+ 3857506,
+ 3867569,
+ 3877601,
+ 3887789,
+ 3897865,
+ 3907956,
+ 3918081,
+ 3928149,
+ 3938292,
+ 3948357,
+ 3958542,
+ 3968658,
+ 3978747,
+ 3988818,
+ 3999000,
+ 4009013,
+ 4019014,
+ 4029173,
+ 4039250,
+ 4049428
+ ],
+ "dead_allies_mean": [
+ 5.0,
+ 5.0,
+ 5.0,
+ 5.0,
+ 5.0,
+ 4.9602272727272725,
+ 4.988636363636363,
+ 4.988372093023256,
+ 4.926829268292683,
+ 4.932926829268292,
+ 4.878048780487805,
+ 4.865853658536586,
+ 4.853658536585366,
+ 4.916666666666667,
+ 4.854651162790698,
+ 4.93125,
+ 4.75,
+ 4.775,
+ 4.845238095238095,
+ 4.634146341463414,
+ 4.80625,
+ 4.817073170731708,
+ 4.670731707317073,
+ 4.732142857142857,
+ 4.880952380952381,
+ 4.803571428571429,
+ 4.798780487804878,
+ 4.833333333333333,
+ 4.767857142857143,
+ 4.7317073170731705,
+ 4.7682926829268295,
+ 4.779761904761905,
+ 4.684523809523809,
+ 4.678571428571429,
+ 4.689024390243903,
+ 4.809523809523809,
+ 4.701219512195122,
+ 4.785714285714286,
+ 4.762195121951219,
+ 4.762195121951219,
+ 4.633720930232558,
+ 4.546511627906977,
+ 4.815476190476191,
+ 4.54375,
+ 4.651162790697675,
+ 4.601190476190476,
+ 4.703488372093023,
+ 4.732558139534884,
+ 4.654761904761905,
+ 4.6686046511627906,
+ 4.726744186046512,
+ 4.659090909090909,
+ 4.6976744186046515,
+ 4.680232558139535,
+ 4.598837209302325,
+ 4.589285714285714,
+ 4.595238095238095,
+ 4.534883720930233,
+ 4.534883720930233,
+ 4.551136363636363,
+ 4.604651162790698,
+ 4.598837209302325,
+ 4.5,
+ 4.607954545454546,
+ 4.696428571428571,
+ 4.684523809523809,
+ 4.708333333333333,
+ 4.696428571428571,
+ 4.726744186046512,
+ 4.7444444444444445,
+ 4.726744186046512,
+ 4.645348837209302,
+ 4.6477272727272725,
+ 4.6976744186046515,
+ 4.761627906976744,
+ 4.651162790697675,
+ 4.676829268292683,
+ 4.7023809523809526,
+ 4.715116279069767,
+ 4.7102272727272725,
+ 4.666666666666667,
+ 4.680232558139535,
+ 4.628048780487805,
+ 4.854651162790698,
+ 4.761627906976744,
+ 4.488636363636363,
+ 4.6875,
+ 4.476190476190476,
+ 4.77906976744186,
+ 4.5523255813953485,
+ 4.653409090909091,
+ 4.6726190476190474,
+ 4.640243902439025,
+ 4.660714285714286,
+ 4.709302325581396,
+ 4.709302325581396,
+ 4.691860465116279,
+ 4.598837209302325,
+ 4.6647727272727275,
+ 4.709302325581396,
+ 4.5523255813953485,
+ 4.684523809523809,
+ 4.715909090909091,
+ 4.744186046511628,
+ 4.565476190476191,
+ 4.7102272727272725,
+ 4.436046511627907,
+ 4.721590909090909,
+ 4.625,
+ 4.6022727272727275,
+ 4.604651162790698,
+ 4.716666666666667,
+ 4.596590909090909,
+ 4.588888888888889,
+ 4.773255813953488,
+ 4.534883720930233,
+ 4.630681818181818,
+ 4.642045454545454,
+ 4.642045454545454,
+ 4.630681818181818,
+ 4.715116279069767,
+ 4.744318181818182,
+ 4.654761904761905,
+ 4.75,
+ 4.6686046511627906,
+ 4.690476190476191,
+ 4.640243902439025,
+ 4.651162790697675,
+ 4.7023809523809526,
+ 4.686046511627907,
+ 4.662790697674419,
+ 4.645348837209302,
+ 4.714285714285714,
+ 4.465116279069767,
+ 4.738372093023256,
+ 4.698863636363637,
+ 4.6,
+ 4.732558139534884,
+ 4.616279069767442,
+ 4.648809523809524,
+ 4.448863636363637,
+ 4.642857142857143,
+ 4.678571428571429,
+ 4.540697674418604,
+ 4.645348837209302,
+ 4.648809523809524,
+ 4.660714285714286,
+ 4.5773809523809526,
+ 4.6875,
+ 4.571428571428571,
+ 4.604651162790698,
+ 4.583333333333333,
+ 4.470238095238095,
+ 4.611111111111111,
+ 4.7023809523809526,
+ 4.428571428571429,
+ 4.656976744186046,
+ 4.613095238095238,
+ 4.476190476190476,
+ 4.511627906976744,
+ 4.5,
+ 4.5523255813953485,
+ 4.494186046511628,
+ 4.565476190476191,
+ 4.440476190476191,
+ 4.634146341463414,
+ 4.684523809523809,
+ 4.523255813953488,
+ 4.619047619047619,
+ 4.440476190476191,
+ 4.5813953488372094,
+ 4.494318181818182,
+ 4.598837209302325,
+ 4.511904761904762,
+ 4.464285714285714,
+ 4.5813953488372094,
+ 4.52906976744186,
+ 4.488372093023256,
+ 4.615853658536586,
+ 4.517857142857143,
+ 4.553571428571429,
+ 4.482558139534884,
+ 4.645348837209302,
+ 4.541666666666667,
+ 4.523809523809524,
+ 4.634146341463414,
+ 4.523809523809524,
+ 4.625,
+ 4.72093023255814,
+ 4.530487804878049,
+ 4.651162790697675,
+ 4.589285714285714,
+ 4.709302325581396,
+ 4.523255813953488,
+ 4.680232558139535,
+ 4.511904761904762,
+ 4.678571428571429,
+ 4.517857142857143,
+ 4.625,
+ 4.630952380952381,
+ 4.571428571428571,
+ 4.573863636363637,
+ 4.445121951219512,
+ 4.5523255813953485,
+ 4.559523809523809,
+ 4.476744186046512,
+ 4.696428571428571,
+ 4.575581395348837,
+ 4.633720930232558,
+ 4.534883720930233,
+ 4.595238095238095,
+ 4.52906976744186,
+ 4.613095238095238,
+ 4.487804878048781,
+ 4.715116279069767,
+ 4.523809523809524,
+ 4.440476190476191,
+ 4.410714285714286,
+ 4.523809523809524,
+ 4.780487804878049,
+ 4.482142857142857,
+ 4.512195121951219,
+ 4.601190476190476,
+ 4.535714285714286,
+ 4.439024390243903,
+ 4.523809523809524,
+ 4.571428571428571,
+ 4.571428571428571,
+ 4.445121951219512,
+ 4.607142857142857,
+ 4.720238095238095,
+ 4.559523809523809,
+ 4.678571428571429,
+ 4.517857142857143,
+ 4.601190476190476,
+ 4.616279069767442,
+ 4.627906976744186,
+ 4.619318181818182,
+ 4.5397727272727275,
+ 4.541666666666667,
+ 4.565476190476191,
+ 4.470238095238095,
+ 4.553571428571429,
+ 4.607142857142857,
+ 4.523809523809524,
+ 4.563953488372093,
+ 4.583333333333333,
+ 4.553571428571429,
+ 4.615853658536586,
+ 4.654761904761905,
+ 4.424418604651163,
+ 4.536585365853658,
+ 4.4523809523809526,
+ 4.351190476190476,
+ 4.689024390243903,
+ 4.61875,
+ 4.530487804878049,
+ 4.505952380952381,
+ 4.615853658536586,
+ 4.5,
+ 4.440476190476191,
+ 4.398809523809524,
+ 4.553571428571429,
+ 4.601190476190476,
+ 4.512195121951219,
+ 4.43125,
+ 4.607142857142857,
+ 4.597560975609756,
+ 4.601190476190476,
+ 4.396341463414634,
+ 4.65,
+ 4.573170731707317,
+ 4.541666666666667,
+ 4.4573170731707314,
+ 4.621951219512195,
+ 4.7023809523809526,
+ 4.585365853658536,
+ 4.529761904761905,
+ 4.386904761904762,
+ 4.469512195121951,
+ 4.451219512195122,
+ 4.345238095238095,
+ 4.642857142857143,
+ 4.517857142857143,
+ 4.475,
+ 4.4817073170731705,
+ 4.408536585365853,
+ 4.551282051282051,
+ 4.445121951219512,
+ 4.5476190476190474,
+ 4.487804878048781,
+ 4.5182926829268295,
+ 4.4573170731707314,
+ 4.359756097560975,
+ 4.5,
+ 4.5182926829268295,
+ 4.463414634146342,
+ 4.470238095238095,
+ 4.559523809523809,
+ 4.5,
+ 4.541666666666667,
+ 4.53125,
+ 4.560975609756097,
+ 4.482142857142857,
+ 4.511627906976744,
+ 4.670731707317073,
+ 4.511904761904762,
+ 4.5182926829268295,
+ 4.375,
+ 4.558139534883721,
+ 4.351190476190476,
+ 4.446428571428571,
+ 4.583333333333333,
+ 4.505952380952381,
+ 4.494186046511628,
+ 4.441860465116279,
+ 4.540697674418604,
+ 4.5125,
+ 4.571428571428571,
+ 4.426829268292683,
+ 4.601190476190476,
+ 4.678571428571429,
+ 4.51875,
+ 4.511904761904762,
+ 4.458333333333333,
+ 4.597560975609756,
+ 4.384146341463414,
+ 4.6104651162790695,
+ 4.546511627906977,
+ 4.396341463414634,
+ 4.451219512195122,
+ 4.343023255813954,
+ 4.52906976744186,
+ 4.465116279069767,
+ 4.630952380952381,
+ 4.4573170731707314,
+ 4.416666666666667,
+ 4.469512195121951,
+ 4.569767441860465,
+ 4.548780487804878,
+ 4.488095238095238,
+ 4.5,
+ 4.505952380952381,
+ 4.4226190476190474,
+ 4.529761904761905,
+ 4.494318181818182,
+ 4.5523255813953485,
+ 4.445121951219512,
+ 4.430232558139535,
+ 4.680232558139535,
+ 4.609756097560975,
+ 4.5,
+ 4.378048780487805,
+ 4.563953488372093,
+ 4.428571428571429,
+ 4.604651162790698,
+ 4.464285714285714,
+ 4.619047619047619,
+ 4.284883720930233,
+ 4.529761904761905,
+ 4.558139534883721,
+ 4.565476190476191,
+ 4.601190476190476,
+ 4.678571428571429,
+ 4.369318181818182,
+ 4.290697674418604,
+ 4.348837209302325,
+ 4.436046511627907,
+ 4.476190476190476,
+ 4.511904761904762,
+ 4.4226190476190474,
+ 4.345238095238095,
+ 4.365853658536586,
+ 4.553571428571429,
+ 4.541666666666667,
+ 4.529761904761905,
+ 4.535714285714286,
+ 4.579268292682927,
+ 4.406976744186046,
+ 4.5182926829268295,
+ 4.398809523809524,
+ 4.4939024390243905,
+ 4.523809523809524,
+ 4.402439024390244,
+ 4.465116279069767,
+ 4.369047619047619,
+ 4.523809523809524,
+ 4.458333333333333,
+ 4.505952380952381,
+ 4.440476190476191,
+ 4.593023255813954,
+ 4.453488372093023,
+ 4.604651162790698,
+ 4.27906976744186,
+ 4.51875,
+ 4.517441860465116,
+ 4.445121951219512,
+ 4.482558139534884,
+ 4.5,
+ 4.453488372093023,
+ 4.3604651162790695
+ ],
+ "dead_allies_mean_T": [
+ 194,
+ 10374,
+ 20428,
+ 30500,
+ 40676,
+ 50873,
+ 61038,
+ 71210,
+ 81230,
+ 91272,
+ 101424,
+ 111431,
+ 121432,
+ 131673,
+ 141819,
+ 151830,
+ 161986,
+ 172109,
+ 182334,
+ 192453,
+ 202496,
+ 212599,
+ 222635,
+ 232764,
+ 242814,
+ 252819,
+ 262843,
+ 272899,
+ 283084,
+ 293094,
+ 303310,
+ 313472,
+ 323617,
+ 333784,
+ 343903,
+ 353921,
+ 364085,
+ 374287,
+ 384290,
+ 394296,
+ 404471,
+ 414560,
+ 424686,
+ 434696,
+ 444926,
+ 455104,
+ 465150,
+ 475252,
+ 485342,
+ 495399,
+ 505436,
+ 515696,
+ 525789,
+ 535930,
+ 545945,
+ 555945,
+ 566049,
+ 576122,
+ 586317,
+ 596519,
+ 606748,
+ 616889,
+ 627158,
+ 637260,
+ 647263,
+ 657273,
+ 667473,
+ 677721,
+ 687849,
+ 698027,
+ 708100,
+ 718102,
+ 728312,
+ 738459,
+ 748659,
+ 758695,
+ 768913,
+ 778922,
+ 789065,
+ 799266,
+ 809346,
+ 819530,
+ 829586,
+ 839697,
+ 849860,
+ 860025,
+ 870220,
+ 880415,
+ 890511,
+ 900525,
+ 910705,
+ 920715,
+ 930760,
+ 940766,
+ 950940,
+ 961103,
+ 971321,
+ 981450,
+ 991606,
+ 1001651,
+ 1011710,
+ 1021718,
+ 1031978,
+ 1042077,
+ 1052110,
+ 1062289,
+ 1072420,
+ 1082524,
+ 1092746,
+ 1102920,
+ 1112940,
+ 1122984,
+ 1133088,
+ 1143284,
+ 1153391,
+ 1163395,
+ 1173498,
+ 1183612,
+ 1193776,
+ 1203953,
+ 1213973,
+ 1224024,
+ 1234153,
+ 1244295,
+ 1254356,
+ 1264497,
+ 1274565,
+ 1284604,
+ 1294621,
+ 1304713,
+ 1314851,
+ 1325035,
+ 1335188,
+ 1345253,
+ 1355510,
+ 1365716,
+ 1375874,
+ 1386066,
+ 1396208,
+ 1406386,
+ 1416515,
+ 1426604,
+ 1436650,
+ 1446726,
+ 1456821,
+ 1466930,
+ 1477099,
+ 1487150,
+ 1497352,
+ 1507391,
+ 1517564,
+ 1527624,
+ 1537679,
+ 1547897,
+ 1558095,
+ 1568161,
+ 1578315,
+ 1588334,
+ 1598393,
+ 1608483,
+ 1618592,
+ 1628657,
+ 1638781,
+ 1649029,
+ 1659119,
+ 1669119,
+ 1679375,
+ 1689413,
+ 1699570,
+ 1709768,
+ 1719828,
+ 1729948,
+ 1740177,
+ 1750184,
+ 1760371,
+ 1770392,
+ 1780586,
+ 1790740,
+ 1800756,
+ 1810941,
+ 1820956,
+ 1831037,
+ 1841313,
+ 1851534,
+ 1861656,
+ 1871864,
+ 1882025,
+ 1892117,
+ 1902270,
+ 1912333,
+ 1922467,
+ 1932708,
+ 1942801,
+ 1952927,
+ 1963074,
+ 1973090,
+ 1983234,
+ 1993269,
+ 2003474,
+ 2013561,
+ 2023661,
+ 2033881,
+ 2043963,
+ 2054119,
+ 2064154,
+ 2074351,
+ 2084444,
+ 2094600,
+ 2104748,
+ 2114931,
+ 2124947,
+ 2135163,
+ 2145354,
+ 2155508,
+ 2165692,
+ 2175715,
+ 2185926,
+ 2196148,
+ 2206249,
+ 2216432,
+ 2226606,
+ 2236624,
+ 2246771,
+ 2256780,
+ 2266859,
+ 2276957,
+ 2287104,
+ 2297302,
+ 2307303,
+ 2317390,
+ 2327462,
+ 2337611,
+ 2347768,
+ 2357940,
+ 2368145,
+ 2378307,
+ 2388379,
+ 2398570,
+ 2408757,
+ 2418798,
+ 2428888,
+ 2438913,
+ 2449116,
+ 2459260,
+ 2469434,
+ 2479626,
+ 2489833,
+ 2499991,
+ 2510020,
+ 2520059,
+ 2530270,
+ 2540433,
+ 2550496,
+ 2560624,
+ 2570804,
+ 2580874,
+ 2590993,
+ 2601088,
+ 2611125,
+ 2621242,
+ 2631351,
+ 2641522,
+ 2651675,
+ 2661736,
+ 2671810,
+ 2681946,
+ 2692137,
+ 2702320,
+ 2712415,
+ 2722550,
+ 2732570,
+ 2742690,
+ 2752870,
+ 2762941,
+ 2773018,
+ 2783159,
+ 2793224,
+ 2803361,
+ 2813487,
+ 2823679,
+ 2833919,
+ 2844122,
+ 2854371,
+ 2864515,
+ 2874568,
+ 2884568,
+ 2894772,
+ 2904828,
+ 2914915,
+ 2925139,
+ 2935385,
+ 2945575,
+ 2955784,
+ 2965878,
+ 2976079,
+ 2986111,
+ 2996149,
+ 3006390,
+ 3016621,
+ 3026662,
+ 3036882,
+ 3046930,
+ 3057109,
+ 3067327,
+ 3077504,
+ 3087571,
+ 3097605,
+ 3107719,
+ 3117777,
+ 3128009,
+ 3138168,
+ 3148229,
+ 3158481,
+ 3168634,
+ 3178702,
+ 3188844,
+ 3199082,
+ 3209229,
+ 3219299,
+ 3229532,
+ 3239753,
+ 3249900,
+ 3259919,
+ 3270054,
+ 3280228,
+ 3290271,
+ 3300374,
+ 3310465,
+ 3320623,
+ 3330643,
+ 3340717,
+ 3350810,
+ 3360905,
+ 3371083,
+ 3381150,
+ 3391367,
+ 3401585,
+ 3411654,
+ 3421858,
+ 3431907,
+ 3442063,
+ 3452100,
+ 3462350,
+ 3472490,
+ 3482659,
+ 3492889,
+ 3503095,
+ 3513118,
+ 3523288,
+ 3533471,
+ 3543630,
+ 3553721,
+ 3563794,
+ 3573998,
+ 3584105,
+ 3594290,
+ 3604367,
+ 3614553,
+ 3624782,
+ 3634831,
+ 3644898,
+ 3654942,
+ 3664952,
+ 3675136,
+ 3685260,
+ 3695334,
+ 3705582,
+ 3715735,
+ 3725762,
+ 3735774,
+ 3745999,
+ 3756145,
+ 3766225,
+ 3776477,
+ 3786555,
+ 3796691,
+ 3806809,
+ 3816898,
+ 3826969,
+ 3837146,
+ 3847388,
+ 3857506,
+ 3867569,
+ 3877601,
+ 3887789,
+ 3897865,
+ 3907956,
+ 3918081,
+ 3928149,
+ 3938292,
+ 3948357,
+ 3958542,
+ 3968658,
+ 3978747,
+ 3988818,
+ 3999000,
+ 4009013,
+ 4019014,
+ 4029173,
+ 4039250,
+ 4049428
+ ],
+ "dead_enemies_mean": [
+ 0.0,
+ 0.32608695652173914,
+ 0.34444444444444444,
+ 0.7443181818181818,
+ 0.9715909090909091,
+ 1.1647727272727273,
+ 1.3352272727272727,
+ 1.255813953488372,
+ 1.5731707317073171,
+ 1.5853658536585367,
+ 1.7378048780487805,
+ 1.6524390243902438,
+ 1.548780487804878,
+ 1.375,
+ 1.5755813953488371,
+ 1.7375,
+ 1.7865853658536586,
+ 1.95,
+ 1.5833333333333333,
+ 1.9573170731707317,
+ 2.0875,
+ 1.8475609756097562,
+ 2.018292682926829,
+ 2.011904761904762,
+ 1.8333333333333333,
+ 2.1011904761904763,
+ 2.268292682926829,
+ 2.0714285714285716,
+ 2.113095238095238,
+ 2.25,
+ 2.2621951219512195,
+ 2.2261904761904763,
+ 2.2083333333333335,
+ 2.1488095238095237,
+ 2.4207317073170733,
+ 2.113095238095238,
+ 2.2621951219512195,
+ 2.2261904761904763,
+ 2.2378048780487805,
+ 2.341463414634146,
+ 2.308139534883721,
+ 2.3255813953488373,
+ 2.2023809523809526,
+ 2.6375,
+ 2.302325581395349,
+ 2.369047619047619,
+ 2.2848837209302326,
+ 2.38953488372093,
+ 2.4226190476190474,
+ 2.2093023255813953,
+ 2.296511627906977,
+ 2.3806818181818183,
+ 2.1744186046511627,
+ 2.4186046511627906,
+ 2.5406976744186047,
+ 2.5833333333333335,
+ 2.4285714285714284,
+ 2.488372093023256,
+ 2.7267441860465116,
+ 2.4488636363636362,
+ 2.377906976744186,
+ 2.441860465116279,
+ 2.4302325581395348,
+ 2.25,
+ 2.2023809523809526,
+ 2.136904761904762,
+ 2.369047619047619,
+ 2.3273809523809526,
+ 2.2151162790697674,
+ 2.1444444444444444,
+ 2.2093023255813953,
+ 2.2732558139534884,
+ 2.4261363636363638,
+ 2.441860465116279,
+ 2.2674418604651163,
+ 2.372093023255814,
+ 2.3658536585365852,
+ 2.5238095238095237,
+ 2.2790697674418605,
+ 2.1875,
+ 2.4166666666666665,
+ 2.5406976744186047,
+ 2.5060975609756095,
+ 2.1802325581395348,
+ 2.3488372093023258,
+ 2.4886363636363638,
+ 2.3579545454545454,
+ 2.642857142857143,
+ 2.2790697674418605,
+ 2.511627906976744,
+ 2.352272727272727,
+ 2.357142857142857,
+ 2.524390243902439,
+ 2.4404761904761907,
+ 2.2906976744186047,
+ 2.4186046511627906,
+ 2.296511627906977,
+ 2.447674418604651,
+ 2.477272727272727,
+ 2.3488372093023258,
+ 2.546511627906977,
+ 2.5,
+ 2.3068181818181817,
+ 2.4186046511627906,
+ 2.5833333333333335,
+ 2.3011363636363638,
+ 2.86046511627907,
+ 2.227272727272727,
+ 2.1761363636363638,
+ 2.3920454545454546,
+ 2.4186046511627906,
+ 2.316666666666667,
+ 2.4261363636363638,
+ 2.338888888888889,
+ 2.1569767441860463,
+ 2.488372093023256,
+ 2.335227272727273,
+ 2.4943181818181817,
+ 2.3068181818181817,
+ 2.5511363636363638,
+ 2.4593023255813953,
+ 2.3920454545454546,
+ 2.5238095238095237,
+ 2.4186046511627906,
+ 2.5872093023255816,
+ 2.7738095238095237,
+ 2.768292682926829,
+ 2.4709302325581395,
+ 2.4166666666666665,
+ 2.686046511627907,
+ 2.4069767441860463,
+ 2.5930232558139537,
+ 2.5892857142857144,
+ 2.703488372093023,
+ 2.5697674418604652,
+ 2.528409090909091,
+ 2.522222222222222,
+ 2.5406976744186047,
+ 2.686046511627907,
+ 2.5,
+ 2.727272727272727,
+ 2.75,
+ 2.5238095238095237,
+ 2.8197674418604652,
+ 2.7674418604651163,
+ 2.6785714285714284,
+ 2.6845238095238093,
+ 2.7202380952380953,
+ 2.590909090909091,
+ 2.6964285714285716,
+ 2.6744186046511627,
+ 2.517857142857143,
+ 2.619047619047619,
+ 2.4055555555555554,
+ 2.7023809523809526,
+ 2.8988095238095237,
+ 2.511627906976744,
+ 2.7023809523809526,
+ 3.017857142857143,
+ 2.9127906976744184,
+ 2.866279069767442,
+ 2.883720930232558,
+ 2.7151162790697674,
+ 2.863095238095238,
+ 2.9285714285714284,
+ 2.682926829268293,
+ 2.7023809523809526,
+ 2.7151162790697674,
+ 2.7857142857142856,
+ 2.9642857142857144,
+ 2.6569767441860463,
+ 2.7443181818181817,
+ 2.6744186046511627,
+ 2.75,
+ 2.9523809523809526,
+ 2.6744186046511627,
+ 2.9186046511627906,
+ 2.808139534883721,
+ 2.8353658536585367,
+ 3.0714285714285716,
+ 2.886904761904762,
+ 2.9302325581395348,
+ 2.7151162790697674,
+ 2.8452380952380953,
+ 2.886904761904762,
+ 2.7804878048780486,
+ 2.863095238095238,
+ 2.6488095238095237,
+ 2.61046511627907,
+ 2.8658536585365852,
+ 2.627906976744186,
+ 2.8452380952380953,
+ 2.4244186046511627,
+ 2.7848837209302326,
+ 2.616279069767442,
+ 2.7202380952380953,
+ 2.6488095238095237,
+ 2.869047619047619,
+ 2.7083333333333335,
+ 2.7202380952380953,
+ 2.7083333333333335,
+ 2.6818181818181817,
+ 3.1036585365853657,
+ 2.8255813953488373,
+ 2.761904761904762,
+ 2.8372093023255816,
+ 2.7916666666666665,
+ 2.761627906976744,
+ 2.61046511627907,
+ 2.691860465116279,
+ 2.7797619047619047,
+ 2.8430232558139537,
+ 2.75,
+ 2.8902439024390243,
+ 2.6627906976744184,
+ 2.9226190476190474,
+ 2.9047619047619047,
+ 2.892857142857143,
+ 2.7857142857142856,
+ 2.798780487804878,
+ 2.880952380952381,
+ 3.0609756097560976,
+ 2.7261904761904763,
+ 2.863095238095238,
+ 2.8353658536585367,
+ 3.005952380952381,
+ 2.8333333333333335,
+ 2.8214285714285716,
+ 2.9939024390243905,
+ 2.7261904761904763,
+ 2.7083333333333335,
+ 2.875,
+ 2.642857142857143,
+ 2.869047619047619,
+ 2.880952380952381,
+ 2.633720930232558,
+ 2.63953488372093,
+ 2.6136363636363638,
+ 2.6363636363636362,
+ 2.7857142857142856,
+ 2.8333333333333335,
+ 2.869047619047619,
+ 2.9702380952380953,
+ 2.7976190476190474,
+ 2.6726190476190474,
+ 2.744186046511628,
+ 2.8333333333333335,
+ 2.9523809523809526,
+ 2.8536585365853657,
+ 2.6011904761904763,
+ 2.796511627906977,
+ 3.091463414634146,
+ 2.9642857142857144,
+ 3.0892857142857144,
+ 3.0060975609756095,
+ 2.93125,
+ 2.902439024390244,
+ 2.9464285714285716,
+ 2.841463414634146,
+ 2.7267441860465116,
+ 2.9107142857142856,
+ 3.0476190476190474,
+ 2.7976190476190474,
+ 2.8154761904761907,
+ 2.841463414634146,
+ 3.15,
+ 2.738095238095238,
+ 2.8902439024390243,
+ 2.5416666666666665,
+ 3.042682926829268,
+ 2.80625,
+ 2.774390243902439,
+ 2.9047619047619047,
+ 3.0,
+ 2.8597560975609757,
+ 2.732142857142857,
+ 2.792682926829268,
+ 2.8392857142857144,
+ 2.9166666666666665,
+ 3.0853658536585367,
+ 2.957317073170732,
+ 3.0535714285714284,
+ 2.7857142857142856,
+ 2.863095238095238,
+ 2.975,
+ 2.932926829268293,
+ 3.1341463414634148,
+ 3.019230769230769,
+ 2.975609756097561,
+ 2.8273809523809526,
+ 3.0304878048780486,
+ 3.018292682926829,
+ 3.018292682926829,
+ 3.1036585365853657,
+ 2.9464285714285716,
+ 2.908536585365854,
+ 3.073170731707317,
+ 2.886904761904762,
+ 2.880952380952381,
+ 2.9634146341463414,
+ 2.988095238095238,
+ 3.10625,
+ 2.9146341463414633,
+ 2.994047619047619,
+ 2.86046511627907,
+ 2.823170731707317,
+ 2.988095238095238,
+ 2.9634146341463414,
+ 3.011904761904762,
+ 2.796511627906977,
+ 3.0416666666666665,
+ 2.875,
+ 2.9047619047619047,
+ 2.9523809523809526,
+ 2.895348837209302,
+ 2.936046511627907,
+ 2.9127906976744184,
+ 3.0875,
+ 2.9107142857142856,
+ 3.042682926829268,
+ 2.869047619047619,
+ 2.6666666666666665,
+ 3.06875,
+ 2.7916666666666665,
+ 3.017857142857143,
+ 2.8780487804878048,
+ 3.1402439024390243,
+ 2.7325581395348837,
+ 2.877906976744186,
+ 3.1097560975609757,
+ 3.097560975609756,
+ 2.936046511627907,
+ 2.7848837209302326,
+ 2.994186046511628,
+ 2.8095238095238093,
+ 3.097560975609756,
+ 2.9464285714285716,
+ 3.0304878048780486,
+ 2.7325581395348837,
+ 2.7865853658536586,
+ 2.7916666666666665,
+ 2.744186046511628,
+ 2.9047619047619047,
+ 2.9702380952380953,
+ 3.0,
+ 2.8636363636363638,
+ 2.7790697674418605,
+ 2.951219512195122,
+ 2.941860465116279,
+ 2.8488372093023258,
+ 2.8048780487804876,
+ 2.926829268292683,
+ 3.225609756097561,
+ 2.9186046511627906,
+ 3.0357142857142856,
+ 2.88953488372093,
+ 2.9642857142857144,
+ 2.8035714285714284,
+ 3.0755813953488373,
+ 2.744047619047619,
+ 2.796511627906977,
+ 2.7916666666666665,
+ 2.9226190476190474,
+ 2.738095238095238,
+ 2.960227272727273,
+ 3.116279069767442,
+ 3.052325581395349,
+ 2.9127906976744184,
+ 2.9523809523809526,
+ 2.994047619047619,
+ 3.0654761904761907,
+ 3.0833333333333335,
+ 3.1097560975609757,
+ 2.9285714285714284,
+ 2.988095238095238,
+ 2.9047619047619047,
+ 2.9464285714285716,
+ 2.9939024390243905,
+ 2.953488372093023,
+ 2.932926829268293,
+ 3.0595238095238093,
+ 3.0548780487804876,
+ 2.875,
+ 3.1707317073170733,
+ 2.808139534883721,
+ 3.1011904761904763,
+ 2.9404761904761907,
+ 3.0297619047619047,
+ 3.0476190476190474,
+ 3.005952380952381,
+ 2.88953488372093,
+ 3.0290697674418605,
+ 2.7906976744186047,
+ 2.988372093023256,
+ 3.08125,
+ 2.895348837209302,
+ 3.0121951219512195,
+ 2.9767441860465116,
+ 2.941860465116279,
+ 2.9069767441860463,
+ 3.0930232558139537
+ ],
+ "dead_enemies_mean_T": [
+ 194,
+ 10374,
+ 20428,
+ 30500,
+ 40676,
+ 50873,
+ 61038,
+ 71210,
+ 81230,
+ 91272,
+ 101424,
+ 111431,
+ 121432,
+ 131673,
+ 141819,
+ 151830,
+ 161986,
+ 172109,
+ 182334,
+ 192453,
+ 202496,
+ 212599,
+ 222635,
+ 232764,
+ 242814,
+ 252819,
+ 262843,
+ 272899,
+ 283084,
+ 293094,
+ 303310,
+ 313472,
+ 323617,
+ 333784,
+ 343903,
+ 353921,
+ 364085,
+ 374287,
+ 384290,
+ 394296,
+ 404471,
+ 414560,
+ 424686,
+ 434696,
+ 444926,
+ 455104,
+ 465150,
+ 475252,
+ 485342,
+ 495399,
+ 505436,
+ 515696,
+ 525789,
+ 535930,
+ 545945,
+ 555945,
+ 566049,
+ 576122,
+ 586317,
+ 596519,
+ 606748,
+ 616889,
+ 627158,
+ 637260,
+ 647263,
+ 657273,
+ 667473,
+ 677721,
+ 687849,
+ 698027,
+ 708100,
+ 718102,
+ 728312,
+ 738459,
+ 748659,
+ 758695,
+ 768913,
+ 778922,
+ 789065,
+ 799266,
+ 809346,
+ 819530,
+ 829586,
+ 839697,
+ 849860,
+ 860025,
+ 870220,
+ 880415,
+ 890511,
+ 900525,
+ 910705,
+ 920715,
+ 930760,
+ 940766,
+ 950940,
+ 961103,
+ 971321,
+ 981450,
+ 991606,
+ 1001651,
+ 1011710,
+ 1021718,
+ 1031978,
+ 1042077,
+ 1052110,
+ 1062289,
+ 1072420,
+ 1082524,
+ 1092746,
+ 1102920,
+ 1112940,
+ 1122984,
+ 1133088,
+ 1143284,
+ 1153391,
+ 1163395,
+ 1173498,
+ 1183612,
+ 1193776,
+ 1203953,
+ 1213973,
+ 1224024,
+ 1234153,
+ 1244295,
+ 1254356,
+ 1264497,
+ 1274565,
+ 1284604,
+ 1294621,
+ 1304713,
+ 1314851,
+ 1325035,
+ 1335188,
+ 1345253,
+ 1355510,
+ 1365716,
+ 1375874,
+ 1386066,
+ 1396208,
+ 1406386,
+ 1416515,
+ 1426604,
+ 1436650,
+ 1446726,
+ 1456821,
+ 1466930,
+ 1477099,
+ 1487150,
+ 1497352,
+ 1507391,
+ 1517564,
+ 1527624,
+ 1537679,
+ 1547897,
+ 1558095,
+ 1568161,
+ 1578315,
+ 1588334,
+ 1598393,
+ 1608483,
+ 1618592,
+ 1628657,
+ 1638781,
+ 1649029,
+ 1659119,
+ 1669119,
+ 1679375,
+ 1689413,
+ 1699570,
+ 1709768,
+ 1719828,
+ 1729948,
+ 1740177,
+ 1750184,
+ 1760371,
+ 1770392,
+ 1780586,
+ 1790740,
+ 1800756,
+ 1810941,
+ 1820956,
+ 1831037,
+ 1841313,
+ 1851534,
+ 1861656,
+ 1871864,
+ 1882025,
+ 1892117,
+ 1902270,
+ 1912333,
+ 1922467,
+ 1932708,
+ 1942801,
+ 1952927,
+ 1963074,
+ 1973090,
+ 1983234,
+ 1993269,
+ 2003474,
+ 2013561,
+ 2023661,
+ 2033881,
+ 2043963,
+ 2054119,
+ 2064154,
+ 2074351,
+ 2084444,
+ 2094600,
+ 2104748,
+ 2114931,
+ 2124947,
+ 2135163,
+ 2145354,
+ 2155508,
+ 2165692,
+ 2175715,
+ 2185926,
+ 2196148,
+ 2206249,
+ 2216432,
+ 2226606,
+ 2236624,
+ 2246771,
+ 2256780,
+ 2266859,
+ 2276957,
+ 2287104,
+ 2297302,
+ 2307303,
+ 2317390,
+ 2327462,
+ 2337611,
+ 2347768,
+ 2357940,
+ 2368145,
+ 2378307,
+ 2388379,
+ 2398570,
+ 2408757,
+ 2418798,
+ 2428888,
+ 2438913,
+ 2449116,
+ 2459260,
+ 2469434,
+ 2479626,
+ 2489833,
+ 2499991,
+ 2510020,
+ 2520059,
+ 2530270,
+ 2540433,
+ 2550496,
+ 2560624,
+ 2570804,
+ 2580874,
+ 2590993,
+ 2601088,
+ 2611125,
+ 2621242,
+ 2631351,
+ 2641522,
+ 2651675,
+ 2661736,
+ 2671810,
+ 2681946,
+ 2692137,
+ 2702320,
+ 2712415,
+ 2722550,
+ 2732570,
+ 2742690,
+ 2752870,
+ 2762941,
+ 2773018,
+ 2783159,
+ 2793224,
+ 2803361,
+ 2813487,
+ 2823679,
+ 2833919,
+ 2844122,
+ 2854371,
+ 2864515,
+ 2874568,
+ 2884568,
+ 2894772,
+ 2904828,
+ 2914915,
+ 2925139,
+ 2935385,
+ 2945575,
+ 2955784,
+ 2965878,
+ 2976079,
+ 2986111,
+ 2996149,
+ 3006390,
+ 3016621,
+ 3026662,
+ 3036882,
+ 3046930,
+ 3057109,
+ 3067327,
+ 3077504,
+ 3087571,
+ 3097605,
+ 3107719,
+ 3117777,
+ 3128009,
+ 3138168,
+ 3148229,
+ 3158481,
+ 3168634,
+ 3178702,
+ 3188844,
+ 3199082,
+ 3209229,
+ 3219299,
+ 3229532,
+ 3239753,
+ 3249900,
+ 3259919,
+ 3270054,
+ 3280228,
+ 3290271,
+ 3300374,
+ 3310465,
+ 3320623,
+ 3330643,
+ 3340717,
+ 3350810,
+ 3360905,
+ 3371083,
+ 3381150,
+ 3391367,
+ 3401585,
+ 3411654,
+ 3421858,
+ 3431907,
+ 3442063,
+ 3452100,
+ 3462350,
+ 3472490,
+ 3482659,
+ 3492889,
+ 3503095,
+ 3513118,
+ 3523288,
+ 3533471,
+ 3543630,
+ 3553721,
+ 3563794,
+ 3573998,
+ 3584105,
+ 3594290,
+ 3604367,
+ 3614553,
+ 3624782,
+ 3634831,
+ 3644898,
+ 3654942,
+ 3664952,
+ 3675136,
+ 3685260,
+ 3695334,
+ 3705582,
+ 3715735,
+ 3725762,
+ 3735774,
+ 3745999,
+ 3756145,
+ 3766225,
+ 3776477,
+ 3786555,
+ 3796691,
+ 3806809,
+ 3816898,
+ 3826969,
+ 3837146,
+ 3847388,
+ 3857506,
+ 3867569,
+ 3877601,
+ 3887789,
+ 3897865,
+ 3907956,
+ 3918081,
+ 3928149,
+ 3938292,
+ 3948357,
+ 3958542,
+ 3968658,
+ 3978747,
+ 3988818,
+ 3999000,
+ 4009013,
+ 4019014,
+ 4029173,
+ 4039250,
+ 4049428
+ ],
+ "ep_length_mean": [
+ 48.5,
+ 55.32608695652174,
+ 55.855555555555554,
+ 57.22727272727273,
+ 57.81818181818182,
+ 57.9375,
+ 57.75568181818182,
+ 59.13953488372093,
+ 61.09756097560975,
+ 61.23170731707317,
+ 61.90243902439025,
+ 61.01829268292683,
+ 60.98170731707317,
+ 60.958333333333336,
+ 58.98837209302326,
+ 62.56875,
+ 61.926829268292686,
+ 63.26875,
+ 60.86309523809524,
+ 61.701219512195124,
+ 62.76875,
+ 61.603658536585364,
+ 61.19512195121951,
+ 60.291666666666664,
+ 59.82142857142857,
+ 59.55357142857143,
+ 61.1219512195122,
+ 59.857142857142854,
+ 60.625,
+ 61.03658536585366,
+ 62.292682926829265,
+ 60.48809523809524,
+ 60.38690476190476,
+ 60.517857142857146,
+ 61.701219512195124,
+ 59.63095238095238,
+ 61.97560975609756,
+ 60.726190476190474,
+ 60.99390243902439,
+ 61.01219512195122,
+ 59.156976744186046,
+ 58.656976744186046,
+ 60.273809523809526,
+ 62.5625,
+ 59.47674418604651,
+ 60.583333333333336,
+ 58.406976744186046,
+ 58.73255813953488,
+ 60.05952380952381,
+ 58.47093023255814,
+ 58.354651162790695,
+ 58.29545454545455,
+ 58.68023255813954,
+ 58.9593023255814,
+ 58.22674418604651,
+ 59.523809523809526,
+ 60.142857142857146,
+ 58.56395348837209,
+ 59.27325581395349,
+ 57.96590909090909,
+ 59.47093023255814,
+ 58.9593023255814,
+ 59.70348837209303,
+ 57.39772727272727,
+ 59.541666666666664,
+ 59.583333333333336,
+ 60.714285714285715,
+ 61.0,
+ 58.883720930232556,
+ 56.544444444444444,
+ 58.56395348837209,
+ 58.151162790697676,
+ 58.01136363636363,
+ 58.99418604651163,
+ 59.30232558139535,
+ 58.348837209302324,
+ 62.30487804878049,
+ 59.57738095238095,
+ 58.97093023255814,
+ 57.96022727272727,
+ 60.0,
+ 59.2093023255814,
+ 61.31707317073171,
+ 58.78488372093023,
+ 59.08720930232558,
+ 57.75568181818182,
+ 57.92613636363637,
+ 60.68452380952381,
+ 58.69767441860465,
+ 58.22093023255814,
+ 57.84090909090909,
+ 59.583333333333336,
+ 61.25,
+ 59.55952380952381,
+ 59.151162790697676,
+ 59.08720930232558,
+ 59.406976744186046,
+ 58.88953488372093,
+ 57.70454545454545,
+ 58.401162790697676,
+ 58.48255813953488,
+ 59.57142857142857,
+ 58.29545454545455,
+ 58.71511627906977,
+ 59.720238095238095,
+ 57.83522727272727,
+ 58.901162790697676,
+ 57.40909090909091,
+ 58.07954545454545,
+ 57.80681818181818,
+ 58.25581395348837,
+ 55.8,
+ 57.40909090909091,
+ 56.644444444444446,
+ 58.76162790697674,
+ 58.16279069767442,
+ 57.40340909090909,
+ 57.46590909090909,
+ 57.75,
+ 57.82386363636363,
+ 58.25581395348837,
+ 57.10795454545455,
+ 60.291666666666664,
+ 58.96511627906977,
+ 58.49418604651163,
+ 60.36309523809524,
+ 61.390243902439025,
+ 58.366279069767444,
+ 59.625,
+ 58.674418604651166,
+ 58.94186046511628,
+ 59.2093023255814,
+ 60.43452380952381,
+ 58.51744186046512,
+ 59.633720930232556,
+ 57.98863636363637,
+ 56.43333333333333,
+ 59.25581395348837,
+ 58.96511627906977,
+ 60.583333333333336,
+ 57.55113636363637,
+ 60.05357142857143,
+ 59.79761904761905,
+ 58.58139534883721,
+ 58.69186046511628,
+ 60.17261904761905,
+ 60.529761904761905,
+ 59.82738095238095,
+ 57.96590909090909,
+ 59.75595238095238,
+ 59.145348837209305,
+ 59.88095238095238,
+ 59.851190476190474,
+ 56.766666666666666,
+ 60.70238095238095,
+ 59.916666666666664,
+ 59.03488372093023,
+ 59.63690476190476,
+ 59.875,
+ 58.66279069767442,
+ 58.77325581395349,
+ 58.51744186046512,
+ 58.86046511627907,
+ 61.0,
+ 60.05952380952381,
+ 60.97560975609756,
+ 61.04761904761905,
+ 58.36046511627907,
+ 60.458333333333336,
+ 60.70238095238095,
+ 58.48837209302326,
+ 57.5,
+ 59.47093023255814,
+ 59.56547619047619,
+ 60.63690476190476,
+ 58.26162790697674,
+ 59.26744186046512,
+ 59.03488372093023,
+ 61.073170731707314,
+ 60.625,
+ 59.61309523809524,
+ 58.61046511627907,
+ 59.74418604651163,
+ 60.839285714285715,
+ 60.25,
+ 62.24390243902439,
+ 60.482142857142854,
+ 60.07142857142857,
+ 59.02906976744186,
+ 61.359756097560975,
+ 58.91860465116279,
+ 60.958333333333336,
+ 58.68023255813954,
+ 58.872093023255815,
+ 58.99418604651163,
+ 59.61904761904762,
+ 60.38095238095238,
+ 59.732142857142854,
+ 60.74404761904762,
+ 60.041666666666664,
+ 60.11904761904762,
+ 58.06818181818182,
+ 61.47560975609756,
+ 59.04651162790697,
+ 59.732142857142854,
+ 59.28488372093023,
+ 60.07738095238095,
+ 59.04651162790697,
+ 59.0,
+ 59.20348837209303,
+ 59.61904761904762,
+ 59.395348837209305,
+ 60.660714285714285,
+ 61.91463414634146,
+ 59.2093023255814,
+ 59.660714285714285,
+ 60.779761904761905,
+ 60.845238095238095,
+ 60.125,
+ 62.09146341463415,
+ 60.55952380952381,
+ 61.08536585365854,
+ 60.398809523809526,
+ 59.57738095238095,
+ 61.457317073170735,
+ 60.107142857142854,
+ 60.398809523809526,
+ 60.70238095238095,
+ 60.98170731707317,
+ 60.041666666666664,
+ 59.95238095238095,
+ 60.410714285714285,
+ 60.458333333333336,
+ 60.54761904761905,
+ 60.74404761904762,
+ 59.08139534883721,
+ 58.55813953488372,
+ 57.90340909090909,
+ 57.88068181818182,
+ 59.767857142857146,
+ 60.05952380952381,
+ 59.67261904761905,
+ 60.732142857142854,
+ 60.38095238095238,
+ 60.55952380952381,
+ 59.25581395348837,
+ 60.75595238095238,
+ 60.464285714285715,
+ 61.15243902439025,
+ 59.75595238095238,
+ 59.366279069767444,
+ 61.96951219512195,
+ 59.898809523809526,
+ 60.285714285714285,
+ 62.073170731707314,
+ 62.9375,
+ 61.701219512195124,
+ 60.089285714285715,
+ 61.201219512195124,
+ 58.81976744186046,
+ 60.17261904761905,
+ 60.541666666666664,
+ 60.43452380952381,
+ 59.88690476190476,
+ 61.426829268292686,
+ 63.35,
+ 60.660714285714285,
+ 62.09146341463415,
+ 60.089285714285715,
+ 61.798780487804876,
+ 62.625,
+ 61.707317073170735,
+ 60.595238095238095,
+ 61.40853658536585,
+ 61.44512195121951,
+ 60.36309523809524,
+ 61.3719512195122,
+ 60.339285714285715,
+ 60.273809523809526,
+ 62.146341463414636,
+ 62.4390243902439,
+ 60.732142857142854,
+ 61.00595238095238,
+ 60.38095238095238,
+ 62.83125,
+ 60.97560975609756,
+ 62.21951219512195,
+ 64.46153846153847,
+ 61.50609756097561,
+ 60.857142857142854,
+ 62.47560975609756,
+ 62.13414634146341,
+ 62.25,
+ 61.548780487804876,
+ 60.720238095238095,
+ 61.170731707317074,
+ 61.207317073170735,
+ 60.958333333333336,
+ 60.898809523809526,
+ 61.22560975609756,
+ 60.833333333333336,
+ 62.8,
+ 62.06707317073171,
+ 60.82142857142857,
+ 59.16860465116279,
+ 61.38414634146341,
+ 59.726190476190474,
+ 61.670731707317074,
+ 59.86904761904762,
+ 59.48837209302326,
+ 60.470238095238095,
+ 59.88690476190476,
+ 61.023809523809526,
+ 60.43452380952381,
+ 58.53488372093023,
+ 58.96511627906977,
+ 59.52325581395349,
+ 63.41875,
+ 59.94047619047619,
+ 62.396341463414636,
+ 60.839285714285715,
+ 60.398809523809526,
+ 62.61875,
+ 60.32738095238095,
+ 60.55952380952381,
+ 61.23780487804878,
+ 61.603658536585364,
+ 58.66860465116279,
+ 59.05813953488372,
+ 61.09756097560975,
+ 61.426829268292686,
+ 58.68023255813954,
+ 58.69186046511628,
+ 59.174418604651166,
+ 59.92261904761905,
+ 62.298780487804876,
+ 60.82142857142857,
+ 61.396341463414636,
+ 59.325581395348834,
+ 61.27439024390244,
+ 60.45238095238095,
+ 58.354651162790695,
+ 61.01190476190476,
+ 60.357142857142854,
+ 60.529761904761905,
+ 58.125,
+ 59.33720930232558,
+ 61.11585365853659,
+ 59.127906976744185,
+ 59.20348837209303,
+ 61.94512195121951,
+ 61.53048780487805,
+ 61.420731707317074,
+ 59.325581395348834,
+ 60.160714285714285,
+ 59.21511627906977,
+ 59.982142857142854,
+ 60.63095238095238,
+ 59.47093023255814,
+ 59.81547619047619,
+ 58.52906976744186,
+ 59.785714285714285,
+ 59.583333333333336,
+ 60.61904761904762,
+ 57.52272727272727,
+ 58.56976744186046,
+ 59.58139534883721,
+ 59.02906976744186,
+ 59.68452380952381,
+ 59.595238095238095,
+ 60.86309523809524,
+ 60.392857142857146,
+ 61.46341463414634,
+ 61.023809523809526,
+ 59.98809523809524,
+ 60.333333333333336,
+ 60.226190476190474,
+ 61.51829268292683,
+ 58.55232558139535,
+ 62.05487804878049,
+ 60.964285714285715,
+ 61.69512195121951,
+ 59.898809523809526,
+ 61.170731707317074,
+ 59.23255813953488,
+ 59.976190476190474,
+ 60.06547619047619,
+ 60.267857142857146,
+ 59.92857142857143,
+ 60.375,
+ 58.51744186046512,
+ 59.21511627906977,
+ 58.81395348837209,
+ 58.656976744186046,
+ 62.94375,
+ 59.19767441860465,
+ 61.05487804878049,
+ 58.145348837209305,
+ 59.06395348837209,
+ 58.58720930232558,
+ 59.174418604651166
+ ],
+ "ep_length_mean_T": [
+ 194,
+ 10374,
+ 20428,
+ 30500,
+ 40676,
+ 50873,
+ 61038,
+ 71210,
+ 81230,
+ 91272,
+ 101424,
+ 111431,
+ 121432,
+ 131673,
+ 141819,
+ 151830,
+ 161986,
+ 172109,
+ 182334,
+ 192453,
+ 202496,
+ 212599,
+ 222635,
+ 232764,
+ 242814,
+ 252819,
+ 262843,
+ 272899,
+ 283084,
+ 293094,
+ 303310,
+ 313472,
+ 323617,
+ 333784,
+ 343903,
+ 353921,
+ 364085,
+ 374287,
+ 384290,
+ 394296,
+ 404471,
+ 414560,
+ 424686,
+ 434696,
+ 444926,
+ 455104,
+ 465150,
+ 475252,
+ 485342,
+ 495399,
+ 505436,
+ 515696,
+ 525789,
+ 535930,
+ 545945,
+ 555945,
+ 566049,
+ 576122,
+ 586317,
+ 596519,
+ 606748,
+ 616889,
+ 627158,
+ 637260,
+ 647263,
+ 657273,
+ 667473,
+ 677721,
+ 687849,
+ 698027,
+ 708100,
+ 718102,
+ 728312,
+ 738459,
+ 748659,
+ 758695,
+ 768913,
+ 778922,
+ 789065,
+ 799266,
+ 809346,
+ 819530,
+ 829586,
+ 839697,
+ 849860,
+ 860025,
+ 870220,
+ 880415,
+ 890511,
+ 900525,
+ 910705,
+ 920715,
+ 930760,
+ 940766,
+ 950940,
+ 961103,
+ 971321,
+ 981450,
+ 991606,
+ 1001651,
+ 1011710,
+ 1021718,
+ 1031978,
+ 1042077,
+ 1052110,
+ 1062289,
+ 1072420,
+ 1082524,
+ 1092746,
+ 1102920,
+ 1112940,
+ 1122984,
+ 1133088,
+ 1143284,
+ 1153391,
+ 1163395,
+ 1173498,
+ 1183612,
+ 1193776,
+ 1203953,
+ 1213973,
+ 1224024,
+ 1234153,
+ 1244295,
+ 1254356,
+ 1264497,
+ 1274565,
+ 1284604,
+ 1294621,
+ 1304713,
+ 1314851,
+ 1325035,
+ 1335188,
+ 1345253,
+ 1355510,
+ 1365716,
+ 1375874,
+ 1386066,
+ 1396208,
+ 1406386,
+ 1416515,
+ 1426604,
+ 1436650,
+ 1446726,
+ 1456821,
+ 1466930,
+ 1477099,
+ 1487150,
+ 1497352,
+ 1507391,
+ 1517564,
+ 1527624,
+ 1537679,
+ 1547897,
+ 1558095,
+ 1568161,
+ 1578315,
+ 1588334,
+ 1598393,
+ 1608483,
+ 1618592,
+ 1628657,
+ 1638781,
+ 1649029,
+ 1659119,
+ 1669119,
+ 1679375,
+ 1689413,
+ 1699570,
+ 1709768,
+ 1719828,
+ 1729948,
+ 1740177,
+ 1750184,
+ 1760371,
+ 1770392,
+ 1780586,
+ 1790740,
+ 1800756,
+ 1810941,
+ 1820956,
+ 1831037,
+ 1841313,
+ 1851534,
+ 1861656,
+ 1871864,
+ 1882025,
+ 1892117,
+ 1902270,
+ 1912333,
+ 1922467,
+ 1932708,
+ 1942801,
+ 1952927,
+ 1963074,
+ 1973090,
+ 1983234,
+ 1993269,
+ 2003474,
+ 2013561,
+ 2023661,
+ 2033881,
+ 2043963,
+ 2054119,
+ 2064154,
+ 2074351,
+ 2084444,
+ 2094600,
+ 2104748,
+ 2114931,
+ 2124947,
+ 2135163,
+ 2145354,
+ 2155508,
+ 2165692,
+ 2175715,
+ 2185926,
+ 2196148,
+ 2206249,
+ 2216432,
+ 2226606,
+ 2236624,
+ 2246771,
+ 2256780,
+ 2266859,
+ 2276957,
+ 2287104,
+ 2297302,
+ 2307303,
+ 2317390,
+ 2327462,
+ 2337611,
+ 2347768,
+ 2357940,
+ 2368145,
+ 2378307,
+ 2388379,
+ 2398570,
+ 2408757,
+ 2418798,
+ 2428888,
+ 2438913,
+ 2449116,
+ 2459260,
+ 2469434,
+ 2479626,
+ 2489833,
+ 2499991,
+ 2510020,
+ 2520059,
+ 2530270,
+ 2540433,
+ 2550496,
+ 2560624,
+ 2570804,
+ 2580874,
+ 2590993,
+ 2601088,
+ 2611125,
+ 2621242,
+ 2631351,
+ 2641522,
+ 2651675,
+ 2661736,
+ 2671810,
+ 2681946,
+ 2692137,
+ 2702320,
+ 2712415,
+ 2722550,
+ 2732570,
+ 2742690,
+ 2752870,
+ 2762941,
+ 2773018,
+ 2783159,
+ 2793224,
+ 2803361,
+ 2813487,
+ 2823679,
+ 2833919,
+ 2844122,
+ 2854371,
+ 2864515,
+ 2874568,
+ 2884568,
+ 2894772,
+ 2904828,
+ 2914915,
+ 2925139,
+ 2935385,
+ 2945575,
+ 2955784,
+ 2965878,
+ 2976079,
+ 2986111,
+ 2996149,
+ 3006390,
+ 3016621,
+ 3026662,
+ 3036882,
+ 3046930,
+ 3057109,
+ 3067327,
+ 3077504,
+ 3087571,
+ 3097605,
+ 3107719,
+ 3117777,
+ 3128009,
+ 3138168,
+ 3148229,
+ 3158481,
+ 3168634,
+ 3178702,
+ 3188844,
+ 3199082,
+ 3209229,
+ 3219299,
+ 3229532,
+ 3239753,
+ 3249900,
+ 3259919,
+ 3270054,
+ 3280228,
+ 3290271,
+ 3300374,
+ 3310465,
+ 3320623,
+ 3330643,
+ 3340717,
+ 3350810,
+ 3360905,
+ 3371083,
+ 3381150,
+ 3391367,
+ 3401585,
+ 3411654,
+ 3421858,
+ 3431907,
+ 3442063,
+ 3452100,
+ 3462350,
+ 3472490,
+ 3482659,
+ 3492889,
+ 3503095,
+ 3513118,
+ 3523288,
+ 3533471,
+ 3543630,
+ 3553721,
+ 3563794,
+ 3573998,
+ 3584105,
+ 3594290,
+ 3604367,
+ 3614553,
+ 3624782,
+ 3634831,
+ 3644898,
+ 3654942,
+ 3664952,
+ 3675136,
+ 3685260,
+ 3695334,
+ 3705582,
+ 3715735,
+ 3725762,
+ 3735774,
+ 3745999,
+ 3756145,
+ 3766225,
+ 3776477,
+ 3786555,
+ 3796691,
+ 3806809,
+ 3816898,
+ 3826969,
+ 3837146,
+ 3847388,
+ 3857506,
+ 3867569,
+ 3877601,
+ 3887789,
+ 3897865,
+ 3907956,
+ 3918081,
+ 3928149,
+ 3938292,
+ 3948357,
+ 3958542,
+ 3968658,
+ 3978747,
+ 3988818,
+ 3999000,
+ 4009013,
+ 4019014,
+ 4029173,
+ 4039250,
+ 4049428
+ ],
+ "episode": [
+ 184,
+ 364,
+ 540,
+ 716,
+ 892,
+ 1068,
+ 1240,
+ 1404,
+ 1568,
+ 1732,
+ 1896,
+ 2064,
+ 2232,
+ 2404,
+ 2564,
+ 2728,
+ 2888,
+ 3056,
+ 3220,
+ 3380,
+ 3544,
+ 3708,
+ 3876,
+ 4044,
+ 4212,
+ 4376,
+ 4544,
+ 4712,
+ 4876,
+ 5040,
+ 5208,
+ 5376,
+ 5544,
+ 5708,
+ 5876,
+ 6040,
+ 6208,
+ 6372,
+ 6536,
+ 6708,
+ 6880,
+ 7048,
+ 7208,
+ 7380,
+ 7548,
+ 7720,
+ 7892,
+ 8060,
+ 8232,
+ 8404,
+ 8580,
+ 8752,
+ 8924,
+ 9096,
+ 9264,
+ 9432,
+ 9604,
+ 9776,
+ 9952,
+ 10124,
+ 10296,
+ 10468,
+ 10644,
+ 10812,
+ 10980,
+ 11148,
+ 11316,
+ 11488,
+ 11668,
+ 11840,
+ 12012,
+ 12188,
+ 12360,
+ 12532,
+ 12704,
+ 12868,
+ 13036,
+ 13208,
+ 13384,
+ 13552,
+ 13724,
+ 13888,
+ 14060,
+ 14232,
+ 14408,
+ 14584,
+ 14752,
+ 14924,
+ 15096,
+ 15272,
+ 15440,
+ 15604,
+ 15772,
+ 15944,
+ 16116,
+ 16288,
+ 16460,
+ 16636,
+ 16808,
+ 16980,
+ 17148,
+ 17324,
+ 17496,
+ 17664,
+ 17840,
+ 18012,
+ 18188,
+ 18364,
+ 18540,
+ 18712,
+ 18892,
+ 19068,
+ 19248,
+ 19420,
+ 19592,
+ 19768,
+ 19944,
+ 20120,
+ 20296,
+ 20468,
+ 20644,
+ 20812,
+ 20984,
+ 21156,
+ 21324,
+ 21488,
+ 21660,
+ 21828,
+ 22000,
+ 22172,
+ 22344,
+ 22512,
+ 22684,
+ 22856,
+ 23032,
+ 23212,
+ 23384,
+ 23556,
+ 23724,
+ 23900,
+ 24068,
+ 24236,
+ 24408,
+ 24580,
+ 24748,
+ 24916,
+ 25084,
+ 25260,
+ 25428,
+ 25600,
+ 25768,
+ 25936,
+ 26116,
+ 26284,
+ 26452,
+ 26624,
+ 26792,
+ 26960,
+ 27132,
+ 27304,
+ 27476,
+ 27648,
+ 27816,
+ 27984,
+ 28148,
+ 28316,
+ 28488,
+ 28656,
+ 28824,
+ 28996,
+ 29172,
+ 29344,
+ 29512,
+ 29680,
+ 29852,
+ 30024,
+ 30196,
+ 30360,
+ 30528,
+ 30696,
+ 30868,
+ 31040,
+ 31208,
+ 31376,
+ 31540,
+ 31708,
+ 31876,
+ 32048,
+ 32212,
+ 32384,
+ 32552,
+ 32724,
+ 32896,
+ 33068,
+ 33236,
+ 33404,
+ 33572,
+ 33740,
+ 33908,
+ 34076,
+ 34252,
+ 34416,
+ 34588,
+ 34756,
+ 34928,
+ 35096,
+ 35268,
+ 35440,
+ 35612,
+ 35780,
+ 35952,
+ 36120,
+ 36284,
+ 36456,
+ 36624,
+ 36792,
+ 36960,
+ 37128,
+ 37292,
+ 37460,
+ 37624,
+ 37792,
+ 37960,
+ 38124,
+ 38292,
+ 38460,
+ 38628,
+ 38792,
+ 38960,
+ 39128,
+ 39296,
+ 39464,
+ 39632,
+ 39800,
+ 39972,
+ 40144,
+ 40320,
+ 40496,
+ 40664,
+ 40832,
+ 41000,
+ 41168,
+ 41336,
+ 41504,
+ 41676,
+ 41844,
+ 42012,
+ 42176,
+ 42344,
+ 42516,
+ 42680,
+ 42848,
+ 43016,
+ 43180,
+ 43340,
+ 43504,
+ 43672,
+ 43836,
+ 44008,
+ 44176,
+ 44344,
+ 44512,
+ 44680,
+ 44844,
+ 45004,
+ 45172,
+ 45336,
+ 45504,
+ 45668,
+ 45828,
+ 45992,
+ 46160,
+ 46324,
+ 46488,
+ 46656,
+ 46820,
+ 46988,
+ 47156,
+ 47320,
+ 47484,
+ 47652,
+ 47820,
+ 47988,
+ 48148,
+ 48312,
+ 48476,
+ 48632,
+ 48796,
+ 48964,
+ 49128,
+ 49292,
+ 49456,
+ 49620,
+ 49788,
+ 49952,
+ 50116,
+ 50284,
+ 50452,
+ 50616,
+ 50784,
+ 50944,
+ 51108,
+ 51276,
+ 51448,
+ 51612,
+ 51780,
+ 51944,
+ 52112,
+ 52284,
+ 52452,
+ 52620,
+ 52788,
+ 52956,
+ 53128,
+ 53300,
+ 53472,
+ 53632,
+ 53800,
+ 53964,
+ 54132,
+ 54300,
+ 54460,
+ 54628,
+ 54796,
+ 54960,
+ 55124,
+ 55296,
+ 55468,
+ 55632,
+ 55796,
+ 55968,
+ 56140,
+ 56312,
+ 56480,
+ 56644,
+ 56812,
+ 56976,
+ 57148,
+ 57312,
+ 57480,
+ 57652,
+ 57820,
+ 57988,
+ 58156,
+ 58332,
+ 58504,
+ 58668,
+ 58840,
+ 59012,
+ 59176,
+ 59340,
+ 59504,
+ 59676,
+ 59844,
+ 60016,
+ 60184,
+ 60352,
+ 60524,
+ 60692,
+ 60864,
+ 61032,
+ 61200,
+ 61368,
+ 61544,
+ 61716,
+ 61888,
+ 62060,
+ 62228,
+ 62396,
+ 62564,
+ 62732,
+ 62896,
+ 63064,
+ 63232,
+ 63400,
+ 63568,
+ 63732,
+ 63904,
+ 64068,
+ 64236,
+ 64400,
+ 64568,
+ 64732,
+ 64904,
+ 65072,
+ 65240,
+ 65408,
+ 65576,
+ 65744,
+ 65916,
+ 66088,
+ 66260,
+ 66432,
+ 66592,
+ 66764,
+ 66928,
+ 67100,
+ 67272,
+ 67444,
+ 67616
+ ],
+ "episode_T": [
+ 10153,
+ 20212,
+ 30290,
+ 40485,
+ 50667,
+ 60793,
+ 70991,
+ 81000,
+ 91015,
+ 101185,
+ 111220,
+ 121432,
+ 131673,
+ 141819,
+ 151830,
+ 161986,
+ 172109,
+ 182334,
+ 192453,
+ 202496,
+ 212599,
+ 222635,
+ 232764,
+ 242814,
+ 252819,
+ 262843,
+ 272899,
+ 283084,
+ 293094,
+ 303310,
+ 313472,
+ 323617,
+ 333784,
+ 343903,
+ 353921,
+ 364085,
+ 374287,
+ 384290,
+ 394296,
+ 404471,
+ 414560,
+ 424686,
+ 434696,
+ 444926,
+ 455104,
+ 465150,
+ 475252,
+ 485342,
+ 495399,
+ 505436,
+ 515696,
+ 525789,
+ 535930,
+ 545945,
+ 555945,
+ 566049,
+ 576122,
+ 586317,
+ 596519,
+ 606748,
+ 616889,
+ 627158,
+ 637260,
+ 647263,
+ 657273,
+ 667473,
+ 677721,
+ 687849,
+ 698027,
+ 708100,
+ 718102,
+ 728312,
+ 738459,
+ 748659,
+ 758695,
+ 768913,
+ 778922,
+ 789065,
+ 799266,
+ 809346,
+ 819530,
+ 829586,
+ 839697,
+ 849860,
+ 860025,
+ 870220,
+ 880415,
+ 890511,
+ 900525,
+ 910705,
+ 920715,
+ 930760,
+ 940766,
+ 950940,
+ 961103,
+ 971321,
+ 981450,
+ 991606,
+ 1001651,
+ 1011710,
+ 1021718,
+ 1031978,
+ 1042077,
+ 1052110,
+ 1062289,
+ 1072420,
+ 1082524,
+ 1092746,
+ 1102920,
+ 1112940,
+ 1122984,
+ 1133088,
+ 1143284,
+ 1153391,
+ 1163395,
+ 1173498,
+ 1183612,
+ 1193776,
+ 1203953,
+ 1213973,
+ 1224024,
+ 1234153,
+ 1244295,
+ 1254356,
+ 1264497,
+ 1274565,
+ 1284604,
+ 1294621,
+ 1304713,
+ 1314851,
+ 1325035,
+ 1335188,
+ 1345253,
+ 1355510,
+ 1365716,
+ 1375874,
+ 1386066,
+ 1396208,
+ 1406386,
+ 1416515,
+ 1426604,
+ 1436650,
+ 1446726,
+ 1456821,
+ 1466930,
+ 1477099,
+ 1487150,
+ 1497352,
+ 1507391,
+ 1517564,
+ 1527624,
+ 1537679,
+ 1547897,
+ 1558095,
+ 1568161,
+ 1578315,
+ 1588334,
+ 1598393,
+ 1608483,
+ 1618592,
+ 1628657,
+ 1638781,
+ 1649029,
+ 1659119,
+ 1669119,
+ 1679375,
+ 1689413,
+ 1699570,
+ 1709768,
+ 1719828,
+ 1729948,
+ 1740177,
+ 1750184,
+ 1760371,
+ 1770392,
+ 1780586,
+ 1790740,
+ 1800756,
+ 1810941,
+ 1820956,
+ 1831037,
+ 1841313,
+ 1851534,
+ 1861656,
+ 1871864,
+ 1882025,
+ 1892117,
+ 1902270,
+ 1912333,
+ 1922467,
+ 1932708,
+ 1942801,
+ 1952927,
+ 1963074,
+ 1973090,
+ 1983234,
+ 1993269,
+ 2003474,
+ 2013561,
+ 2023661,
+ 2033881,
+ 2043963,
+ 2054119,
+ 2064154,
+ 2074351,
+ 2084444,
+ 2094600,
+ 2104748,
+ 2114931,
+ 2124947,
+ 2135163,
+ 2145354,
+ 2155508,
+ 2165692,
+ 2175715,
+ 2185926,
+ 2196148,
+ 2206249,
+ 2216432,
+ 2226606,
+ 2236624,
+ 2246771,
+ 2256780,
+ 2266859,
+ 2276957,
+ 2287104,
+ 2297302,
+ 2307303,
+ 2317390,
+ 2327462,
+ 2337611,
+ 2347768,
+ 2357940,
+ 2368145,
+ 2378307,
+ 2388379,
+ 2398570,
+ 2408757,
+ 2418798,
+ 2428888,
+ 2438913,
+ 2449116,
+ 2459260,
+ 2469434,
+ 2479626,
+ 2489833,
+ 2499991,
+ 2510020,
+ 2520059,
+ 2530270,
+ 2540433,
+ 2550496,
+ 2560624,
+ 2570804,
+ 2580874,
+ 2590993,
+ 2601088,
+ 2611125,
+ 2621242,
+ 2631351,
+ 2641522,
+ 2651675,
+ 2661736,
+ 2671810,
+ 2681946,
+ 2692137,
+ 2702320,
+ 2712415,
+ 2722550,
+ 2732570,
+ 2742690,
+ 2752870,
+ 2762941,
+ 2773018,
+ 2783159,
+ 2793224,
+ 2803361,
+ 2813487,
+ 2823679,
+ 2833919,
+ 2844122,
+ 2854371,
+ 2864515,
+ 2874568,
+ 2884568,
+ 2894772,
+ 2904828,
+ 2914915,
+ 2925139,
+ 2935385,
+ 2945575,
+ 2955784,
+ 2965878,
+ 2976079,
+ 2986111,
+ 2996149,
+ 3006390,
+ 3016621,
+ 3026662,
+ 3036882,
+ 3046930,
+ 3057109,
+ 3067327,
+ 3077504,
+ 3087571,
+ 3097605,
+ 3107719,
+ 3117777,
+ 3128009,
+ 3138168,
+ 3148229,
+ 3158481,
+ 3168634,
+ 3178702,
+ 3188844,
+ 3199082,
+ 3209229,
+ 3219299,
+ 3229532,
+ 3239753,
+ 3249900,
+ 3259919,
+ 3270054,
+ 3280228,
+ 3290271,
+ 3300374,
+ 3310465,
+ 3320623,
+ 3330643,
+ 3340717,
+ 3350810,
+ 3360905,
+ 3371083,
+ 3381150,
+ 3391367,
+ 3401585,
+ 3411654,
+ 3421858,
+ 3431907,
+ 3442063,
+ 3452100,
+ 3462350,
+ 3472490,
+ 3482659,
+ 3492889,
+ 3503095,
+ 3513118,
+ 3523288,
+ 3533471,
+ 3543630,
+ 3553721,
+ 3563794,
+ 3573998,
+ 3584105,
+ 3594290,
+ 3604367,
+ 3614553,
+ 3624782,
+ 3634831,
+ 3644898,
+ 3654942,
+ 3664952,
+ 3675136,
+ 3685260,
+ 3695334,
+ 3705582,
+ 3715735,
+ 3725762,
+ 3735774,
+ 3745999,
+ 3756145,
+ 3766225,
+ 3776477,
+ 3786555,
+ 3796691,
+ 3806809,
+ 3816898,
+ 3826969,
+ 3837146,
+ 3847388,
+ 3857506,
+ 3867569,
+ 3877601,
+ 3887789,
+ 3897865,
+ 3907956,
+ 3918081,
+ 3928149,
+ 3938292,
+ 3948357,
+ 3958542,
+ 3968658,
+ 3978747,
+ 3988818,
+ 3999000,
+ 4009013,
+ 4019014,
+ 4029173,
+ 4039250,
+ 4049428
+ ],
+ "episode_in_buffer": [
+ 184,
+ 364,
+ 540,
+ 716,
+ 892,
+ 1068,
+ 1240,
+ 1404,
+ 1568,
+ 1732,
+ 1896,
+ 2064,
+ 2232,
+ 2404,
+ 2564,
+ 2728,
+ 2888,
+ 3056,
+ 3220,
+ 3380,
+ 3544,
+ 3708,
+ 3876,
+ 4044,
+ 4212,
+ 4376,
+ 4544,
+ 4712,
+ 4876,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000
+ ],
+ "episode_in_buffer_T": [
+ 10153,
+ 20212,
+ 30290,
+ 40485,
+ 50667,
+ 60793,
+ 70991,
+ 81000,
+ 91015,
+ 101185,
+ 111220,
+ 121432,
+ 131673,
+ 141819,
+ 151830,
+ 161986,
+ 172109,
+ 182334,
+ 192453,
+ 202496,
+ 212599,
+ 222635,
+ 232764,
+ 242814,
+ 252819,
+ 262843,
+ 272899,
+ 283084,
+ 293094,
+ 303310,
+ 313472,
+ 323617,
+ 333784,
+ 343903,
+ 353921,
+ 364085,
+ 374287,
+ 384290,
+ 394296,
+ 404471,
+ 414560,
+ 424686,
+ 434696,
+ 444926,
+ 455104,
+ 465150,
+ 475252,
+ 485342,
+ 495399,
+ 505436,
+ 515696,
+ 525789,
+ 535930,
+ 545945,
+ 555945,
+ 566049,
+ 576122,
+ 586317,
+ 596519,
+ 606748,
+ 616889,
+ 627158,
+ 637260,
+ 647263,
+ 657273,
+ 667473,
+ 677721,
+ 687849,
+ 698027,
+ 708100,
+ 718102,
+ 728312,
+ 738459,
+ 748659,
+ 758695,
+ 768913,
+ 778922,
+ 789065,
+ 799266,
+ 809346,
+ 819530,
+ 829586,
+ 839697,
+ 849860,
+ 860025,
+ 870220,
+ 880415,
+ 890511,
+ 900525,
+ 910705,
+ 920715,
+ 930760,
+ 940766,
+ 950940,
+ 961103,
+ 971321,
+ 981450,
+ 991606,
+ 1001651,
+ 1011710,
+ 1021718,
+ 1031978,
+ 1042077,
+ 1052110,
+ 1062289,
+ 1072420,
+ 1082524,
+ 1092746,
+ 1102920,
+ 1112940,
+ 1122984,
+ 1133088,
+ 1143284,
+ 1153391,
+ 1163395,
+ 1173498,
+ 1183612,
+ 1193776,
+ 1203953,
+ 1213973,
+ 1224024,
+ 1234153,
+ 1244295,
+ 1254356,
+ 1264497,
+ 1274565,
+ 1284604,
+ 1294621,
+ 1304713,
+ 1314851,
+ 1325035,
+ 1335188,
+ 1345253,
+ 1355510,
+ 1365716,
+ 1375874,
+ 1386066,
+ 1396208,
+ 1406386,
+ 1416515,
+ 1426604,
+ 1436650,
+ 1446726,
+ 1456821,
+ 1466930,
+ 1477099,
+ 1487150,
+ 1497352,
+ 1507391,
+ 1517564,
+ 1527624,
+ 1537679,
+ 1547897,
+ 1558095,
+ 1568161,
+ 1578315,
+ 1588334,
+ 1598393,
+ 1608483,
+ 1618592,
+ 1628657,
+ 1638781,
+ 1649029,
+ 1659119,
+ 1669119,
+ 1679375,
+ 1689413,
+ 1699570,
+ 1709768,
+ 1719828,
+ 1729948,
+ 1740177,
+ 1750184,
+ 1760371,
+ 1770392,
+ 1780586,
+ 1790740,
+ 1800756,
+ 1810941,
+ 1820956,
+ 1831037,
+ 1841313,
+ 1851534,
+ 1861656,
+ 1871864,
+ 1882025,
+ 1892117,
+ 1902270,
+ 1912333,
+ 1922467,
+ 1932708,
+ 1942801,
+ 1952927,
+ 1963074,
+ 1973090,
+ 1983234,
+ 1993269,
+ 2003474,
+ 2013561,
+ 2023661,
+ 2033881,
+ 2043963,
+ 2054119,
+ 2064154,
+ 2074351,
+ 2084444,
+ 2094600,
+ 2104748,
+ 2114931,
+ 2124947,
+ 2135163,
+ 2145354,
+ 2155508,
+ 2165692,
+ 2175715,
+ 2185926,
+ 2196148,
+ 2206249,
+ 2216432,
+ 2226606,
+ 2236624,
+ 2246771,
+ 2256780,
+ 2266859,
+ 2276957,
+ 2287104,
+ 2297302,
+ 2307303,
+ 2317390,
+ 2327462,
+ 2337611,
+ 2347768,
+ 2357940,
+ 2368145,
+ 2378307,
+ 2388379,
+ 2398570,
+ 2408757,
+ 2418798,
+ 2428888,
+ 2438913,
+ 2449116,
+ 2459260,
+ 2469434,
+ 2479626,
+ 2489833,
+ 2499991,
+ 2510020,
+ 2520059,
+ 2530270,
+ 2540433,
+ 2550496,
+ 2560624,
+ 2570804,
+ 2580874,
+ 2590993,
+ 2601088,
+ 2611125,
+ 2621242,
+ 2631351,
+ 2641522,
+ 2651675,
+ 2661736,
+ 2671810,
+ 2681946,
+ 2692137,
+ 2702320,
+ 2712415,
+ 2722550,
+ 2732570,
+ 2742690,
+ 2752870,
+ 2762941,
+ 2773018,
+ 2783159,
+ 2793224,
+ 2803361,
+ 2813487,
+ 2823679,
+ 2833919,
+ 2844122,
+ 2854371,
+ 2864515,
+ 2874568,
+ 2884568,
+ 2894772,
+ 2904828,
+ 2914915,
+ 2925139,
+ 2935385,
+ 2945575,
+ 2955784,
+ 2965878,
+ 2976079,
+ 2986111,
+ 2996149,
+ 3006390,
+ 3016621,
+ 3026662,
+ 3036882,
+ 3046930,
+ 3057109,
+ 3067327,
+ 3077504,
+ 3087571,
+ 3097605,
+ 3107719,
+ 3117777,
+ 3128009,
+ 3138168,
+ 3148229,
+ 3158481,
+ 3168634,
+ 3178702,
+ 3188844,
+ 3199082,
+ 3209229,
+ 3219299,
+ 3229532,
+ 3239753,
+ 3249900,
+ 3259919,
+ 3270054,
+ 3280228,
+ 3290271,
+ 3300374,
+ 3310465,
+ 3320623,
+ 3330643,
+ 3340717,
+ 3350810,
+ 3360905,
+ 3371083,
+ 3381150,
+ 3391367,
+ 3401585,
+ 3411654,
+ 3421858,
+ 3431907,
+ 3442063,
+ 3452100,
+ 3462350,
+ 3472490,
+ 3482659,
+ 3492889,
+ 3503095,
+ 3513118,
+ 3523288,
+ 3533471,
+ 3543630,
+ 3553721,
+ 3563794,
+ 3573998,
+ 3584105,
+ 3594290,
+ 3604367,
+ 3614553,
+ 3624782,
+ 3634831,
+ 3644898,
+ 3654942,
+ 3664952,
+ 3675136,
+ 3685260,
+ 3695334,
+ 3705582,
+ 3715735,
+ 3725762,
+ 3735774,
+ 3745999,
+ 3756145,
+ 3766225,
+ 3776477,
+ 3786555,
+ 3796691,
+ 3806809,
+ 3816898,
+ 3826969,
+ 3837146,
+ 3847388,
+ 3857506,
+ 3867569,
+ 3877601,
+ 3887789,
+ 3897865,
+ 3907956,
+ 3918081,
+ 3928149,
+ 3938292,
+ 3948357,
+ 3958542,
+ 3968658,
+ 3978747,
+ 3988818,
+ 3999000,
+ 4009013,
+ 4019014,
+ 4029173,
+ 4039250,
+ 4049428
+ ],
+ "epsilon": [
+ 1.0,
+ 0.9035465,
+ 0.8079860000000001,
+ 0.712245,
+ 0.6153925,
+ 0.5186635,
+ 0.4224665000000001,
+ 0.3255855000000001,
+ 0.23050000000000015,
+ 0.13535750000000013,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05
+ ],
+ "epsilon_T": [
+ 194,
+ 10374,
+ 20428,
+ 30500,
+ 40676,
+ 50873,
+ 61038,
+ 71210,
+ 81230,
+ 91272,
+ 101424,
+ 111431,
+ 121432,
+ 131673,
+ 141819,
+ 151830,
+ 161986,
+ 172109,
+ 182334,
+ 192453,
+ 202496,
+ 212599,
+ 222635,
+ 232764,
+ 242814,
+ 252819,
+ 262843,
+ 272899,
+ 283084,
+ 293094,
+ 303310,
+ 313472,
+ 323617,
+ 333784,
+ 343903,
+ 353921,
+ 364085,
+ 374287,
+ 384290,
+ 394296,
+ 404471,
+ 414560,
+ 424686,
+ 434696,
+ 444926,
+ 455104,
+ 465150,
+ 475252,
+ 485342,
+ 495399,
+ 505436,
+ 515696,
+ 525789,
+ 535930,
+ 545945,
+ 555945,
+ 566049,
+ 576122,
+ 586317,
+ 596519,
+ 606748,
+ 616889,
+ 627158,
+ 637260,
+ 647263,
+ 657273,
+ 667473,
+ 677721,
+ 687849,
+ 698027,
+ 708100,
+ 718102,
+ 728312,
+ 738459,
+ 748659,
+ 758695,
+ 768913,
+ 778922,
+ 789065,
+ 799266,
+ 809346,
+ 819530,
+ 829586,
+ 839697,
+ 849860,
+ 860025,
+ 870220,
+ 880415,
+ 890511,
+ 900525,
+ 910705,
+ 920715,
+ 930760,
+ 940766,
+ 950940,
+ 961103,
+ 971321,
+ 981450,
+ 991606,
+ 1001651,
+ 1011710,
+ 1021718,
+ 1031978,
+ 1042077,
+ 1052110,
+ 1062289,
+ 1072420,
+ 1082524,
+ 1092746,
+ 1102920,
+ 1112940,
+ 1122984,
+ 1133088,
+ 1143284,
+ 1153391,
+ 1163395,
+ 1173498,
+ 1183612,
+ 1193776,
+ 1203953,
+ 1213973,
+ 1224024,
+ 1234153,
+ 1244295,
+ 1254356,
+ 1264497,
+ 1274565,
+ 1284604,
+ 1294621,
+ 1304713,
+ 1314851,
+ 1325035,
+ 1335188,
+ 1345253,
+ 1355510,
+ 1365716,
+ 1375874,
+ 1386066,
+ 1396208,
+ 1406386,
+ 1416515,
+ 1426604,
+ 1436650,
+ 1446726,
+ 1456821,
+ 1466930,
+ 1477099,
+ 1487150,
+ 1497352,
+ 1507391,
+ 1517564,
+ 1527624,
+ 1537679,
+ 1547897,
+ 1558095,
+ 1568161,
+ 1578315,
+ 1588334,
+ 1598393,
+ 1608483,
+ 1618592,
+ 1628657,
+ 1638781,
+ 1649029,
+ 1659119,
+ 1669119,
+ 1679375,
+ 1689413,
+ 1699570,
+ 1709768,
+ 1719828,
+ 1729948,
+ 1740177,
+ 1750184,
+ 1760371,
+ 1770392,
+ 1780586,
+ 1790740,
+ 1800756,
+ 1810941,
+ 1820956,
+ 1831037,
+ 1841313,
+ 1851534,
+ 1861656,
+ 1871864,
+ 1882025,
+ 1892117,
+ 1902270,
+ 1912333,
+ 1922467,
+ 1932708,
+ 1942801,
+ 1952927,
+ 1963074,
+ 1973090,
+ 1983234,
+ 1993269,
+ 2003474,
+ 2013561,
+ 2023661,
+ 2033881,
+ 2043963,
+ 2054119,
+ 2064154,
+ 2074351,
+ 2084444,
+ 2094600,
+ 2104748,
+ 2114931,
+ 2124947,
+ 2135163,
+ 2145354,
+ 2155508,
+ 2165692,
+ 2175715,
+ 2185926,
+ 2196148,
+ 2206249,
+ 2216432,
+ 2226606,
+ 2236624,
+ 2246771,
+ 2256780,
+ 2266859,
+ 2276957,
+ 2287104,
+ 2297302,
+ 2307303,
+ 2317390,
+ 2327462,
+ 2337611,
+ 2347768,
+ 2357940,
+ 2368145,
+ 2378307,
+ 2388379,
+ 2398570,
+ 2408757,
+ 2418798,
+ 2428888,
+ 2438913,
+ 2449116,
+ 2459260,
+ 2469434,
+ 2479626,
+ 2489833,
+ 2499991,
+ 2510020,
+ 2520059,
+ 2530270,
+ 2540433,
+ 2550496,
+ 2560624,
+ 2570804,
+ 2580874,
+ 2590993,
+ 2601088,
+ 2611125,
+ 2621242,
+ 2631351,
+ 2641522,
+ 2651675,
+ 2661736,
+ 2671810,
+ 2681946,
+ 2692137,
+ 2702320,
+ 2712415,
+ 2722550,
+ 2732570,
+ 2742690,
+ 2752870,
+ 2762941,
+ 2773018,
+ 2783159,
+ 2793224,
+ 2803361,
+ 2813487,
+ 2823679,
+ 2833919,
+ 2844122,
+ 2854371,
+ 2864515,
+ 2874568,
+ 2884568,
+ 2894772,
+ 2904828,
+ 2914915,
+ 2925139,
+ 2935385,
+ 2945575,
+ 2955784,
+ 2965878,
+ 2976079,
+ 2986111,
+ 2996149,
+ 3006390,
+ 3016621,
+ 3026662,
+ 3036882,
+ 3046930,
+ 3057109,
+ 3067327,
+ 3077504,
+ 3087571,
+ 3097605,
+ 3107719,
+ 3117777,
+ 3128009,
+ 3138168,
+ 3148229,
+ 3158481,
+ 3168634,
+ 3178702,
+ 3188844,
+ 3199082,
+ 3209229,
+ 3219299,
+ 3229532,
+ 3239753,
+ 3249900,
+ 3259919,
+ 3270054,
+ 3280228,
+ 3290271,
+ 3300374,
+ 3310465,
+ 3320623,
+ 3330643,
+ 3340717,
+ 3350810,
+ 3360905,
+ 3371083,
+ 3381150,
+ 3391367,
+ 3401585,
+ 3411654,
+ 3421858,
+ 3431907,
+ 3442063,
+ 3452100,
+ 3462350,
+ 3472490,
+ 3482659,
+ 3492889,
+ 3503095,
+ 3513118,
+ 3523288,
+ 3533471,
+ 3543630,
+ 3553721,
+ 3563794,
+ 3573998,
+ 3584105,
+ 3594290,
+ 3604367,
+ 3614553,
+ 3624782,
+ 3634831,
+ 3644898,
+ 3654942,
+ 3664952,
+ 3675136,
+ 3685260,
+ 3695334,
+ 3705582,
+ 3715735,
+ 3725762,
+ 3735774,
+ 3745999,
+ 3756145,
+ 3766225,
+ 3776477,
+ 3786555,
+ 3796691,
+ 3806809,
+ 3816898,
+ 3826969,
+ 3837146,
+ 3847388,
+ 3857506,
+ 3867569,
+ 3877601,
+ 3887789,
+ 3897865,
+ 3907956,
+ 3918081,
+ 3928149,
+ 3938292,
+ 3948357,
+ 3958542,
+ 3968658,
+ 3978747,
+ 3988818,
+ 3999000,
+ 4009013,
+ 4019014,
+ 4029173,
+ 4039250,
+ 4049428
+ ],
+ "grad_norm_manager": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNTk2MDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjU5NjAwcQFhLgEAAAAAAAAAMRAARA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NjE5MDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDYxOTA0cQFhLgEAAAAAAAAAIruARA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5Mjk0ODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTI5NDg4cQFhLgEAAAAAAAAAerj5RA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyODM4ODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjgzODg4cQFhLgEAAAAAAAAAVUgIRQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMTY0OTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjE2NDk2cQFhLgEAAAAAAAAAbZMrRQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NDQ2NTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTQ0NjU2cQFhLgEAAAAAAAAA5cRGRQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5MTI0MDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTEyNDAwcQFhLgEAAAAAAAAA/Ft9RQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5MzQ4NjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTM0ODY0cQFhLgEAAAAAAAAAWrBkRQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5MjQ2ODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTI0Njg4cQFhLgEAAAAAAAAAOceCRQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMDYzMjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjA2MzIwcQFhLgEAAAAAAAAAYU+gRQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDAzNzU0MDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwMzc1NDA4cQFhLgEAAAAAAAAAWF2pRQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMDQyMDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjA0MjA4cQFhLgEAAAAAAAAAwlWnRQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ4OTM2ODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0ODkzNjgwcQFhLgEAAAAAAAAAilLLRQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyNTEwODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjUxMDg4cQFhLgEAAAAAAAAARB/aRQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyOTY1NjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3Mjk2NTYwcQFhLgEAAAAAAAAAlBHqRQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMDk0ODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjA5NDg4cQFhLgEAAAAAAAAAykHvRQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5MDkwNDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTA5MDQwcQFhLgEAAAAAAAAAt+n/RQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ4OTIyNDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0ODkyMjQwcQFhLgEAAAAAAAAAoMEFRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxNDkwNTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTQ5MDU2cQFhLgEAAAAAAAAAO3URRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMjI5NDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTIyOTQ0cQFhLgEAAAAAAAAAUZ8ERg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNDM4NTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTQzODU2cQFhLgEAAAAAAAAAyZYrRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyOTQ2NDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3Mjk0NjQwcQFhLgEAAAAAAAAAUssYRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5MTI4ODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTEyODgwcQFhLgEAAAAAAAAARdgtRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA1MDAxMTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNTAwMTEycQFhLgEAAAAAAAAA6cEvRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNDA0OTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTQwNDk2cQFhLgEAAAAAAAAAxLlDRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MDk5NjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDA5OTY4cQFhLgEAAAAAAAAABUwxRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NDcyMTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDQ3MjE2cQFhLgEAAAAAAAAARpk+Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MjM5ODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDIzOTg0cQFhLgEAAAAAAAAA0eQ2Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNjU5MzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjY1OTM2cQFhLgEAAAAAAAAAcpQlRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxNjk0NTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTY5NDU2cQFhLgEAAAAAAAAAOqs3Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NDc1MDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDQ3NTA0cQFhLgEAAAAAAAAArVBeRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNTY5MTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTU2OTEycQFhLgEAAAAAAAAAXTlMRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzMjI0MTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzIyNDE2cQFhLgEAAAAAAAAA5llnRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMTA0NDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjEwNDQ4cQFhLgEAAAAAAAAABCRRRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0OTgxOTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDk4MTkycQFhLgEAAAAAAAAAJyY9Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMzIzMzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjMyMzM2cQFhLgEAAAAAAAAAiwJrRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxNTc2OTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTU3Njk2cQFhLgEAAAAAAAAAiORMRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxODE5MzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTgxOTM2cQFhLgEAAAAAAAAA815ORg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NjU5MzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDY1OTM2cQFhLgEAAAAAAAAAA/N8Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5MjYxMjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTI2MTI4cQFhLgEAAAAAAAAAI7x9Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMzA0MzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTMwNDMycQFhLgEAAAAAAAAABpV7Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNjE1MjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTYxNTIwcQFhLgEAAAAAAAAAMshyRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA1MDQzMzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNTA0MzM2cQFhLgEAAAAAAAAAk35mRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyOTMyMzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjkzMjMycQFhLgEAAAAAAAAA1DJlRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MjExMDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDIxMTA0cQFhLgEAAAAAAAAAMJ56Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMTQ4NjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjE0ODY0cQFhLgEAAAAAAAAA3f+HRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5MDM3NjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTAzNzYwcQFhLgEAAAAAAAAAZIdlRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxNTI0MTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTUyNDE2cQFhLgEAAAAAAAAAO3iIRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNjM4MjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTYzODI0cQFhLgEAAAAAAAAAFO52Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MTg4OTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDE4ODk2cQFhLgEAAAAAAAAAZM15Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxODA3ODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTgwNzg0cQFhLgEAAAAAAAAAdON0Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyODEwMDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjgxMDA4cQFhLgEAAAAAAAAATTxeRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNzMzMjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTczMzI4cQFhLgEAAAAAAAAA3i5+Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDAzNzIxNDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwMzcyMTQ0cQFhLgEAAAAAAAAARWl5Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxODc4MjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTg3ODI0cQFhLgEAAAAAAAAAzeOBRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxOTE4NTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTkxODU2cQFhLgEAAAAAAAAAHO12Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNDU3NzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTQ1Nzc2cQFhLgEAAAAAAAAAh7OMRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NTMyNjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDUzMjY0cQFhLgEAAAAAAAAA4yKORg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0ODEyMDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDgxMjAwcQFhLgEAAAAAAAAAU3qjRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNTg1NDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTU4NTQ0cQFhLgEAAAAAAAAAmyqHRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0OTMyMDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDkzMjAwcQFhLgEAAAAAAAAAAg2VRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxNTcyMTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTU3MjE2cQFhLgEAAAAAAAAAE3mbRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNDY0NDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjQ2NDQ4cQFhLgEAAAAAAAAAG4uLRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNTA2NzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjUwNjcycQFhLgEAAAAAAAAAEjiLRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MjUyMzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDI1MjMycQFhLgEAAAAAAAAAwZCCRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMjU3MTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjI1NzEycQFhLgEAAAAAAAAAFeqPRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxMjc5MjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTI3OTIwcQFhLgEAAAAAAAAACTaWRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNjMwNTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjYzMDU2cQFhLgEAAAAAAAAAYWOORg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDAzNzY5NDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwMzc2OTQ0cQFhLgEAAAAAAAAAGXWZRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMzkwNTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjM5MDU2cQFhLgEAAAAAAAAAgTGZRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0OTg4NjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDk4ODY0cQFhLgEAAAAAAAAA3G6zRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTUwMDAyNDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU1MDAwMjQwcQFhLgEAAAAAAAAAgimdRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0OTQ3MzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDk0NzM2cQFhLgEAAAAAAAAA/6ykRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTUwMDcxNTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU1MDA3MTUycQFhLgEAAAAAAAAAVz6/Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5MjkyOTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTI5Mjk2cQFhLgEAAAAAAAAAS6e7Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyODczNzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjg3Mzc2cQFhLgEAAAAAAAAAmK+nRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NTk2MDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDU5NjAwcQFhLgEAAAAAAAAAVVGxRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMjQ1NzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTI0NTc2cQFhLgEAAAAAAAAAtXGpRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MTU3MjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDE1NzI4cQFhLgEAAAAAAAAAINm4Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNzc3NDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3Mjc3NzQ0cQFhLgEAAAAAAAAAxF+xRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NjAwMTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTYwMDE2cQFhLgEAAAAAAAAAnnC2Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NzE5MjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTcxOTIwcQFhLgEAAAAAAAAAH23DRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NTQ1NDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTU0NTQ0cQFhLgEAAAAAAAAAXhyoRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NTYxNDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDU2MTQ0cQFhLgEAAAAAAAAAURusRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTUwMDA2MjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU1MDAwNjI0cQFhLgEAAAAAAAAAWmu/Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MzkzNDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDM5MzQ0cQFhLgEAAAAAAAAAKFGyRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMDYyMjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjA2MjI0cQFhLgEAAAAAAAAA+iinRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0Mjg4ODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDI4ODgwcQFhLgEAAAAAAAAAUx+nRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyODA0NjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjgwNDY0cQFhLgEAAAAAAAAALy+zRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzNDg3MjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzQ4NzIwcQFhLgEAAAAAAAAAzjGgRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MDk1ODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDA5NTg0cQFhLgEAAAAAAAAAN7OYRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxMzU2OTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTM1Njk2cQFhLgEAAAAAAAAAToW7Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MjYwOTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDI2MDk2cQFhLgEAAAAAAAAAlJq7Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNDI4MDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTQyODAwcQFhLgEAAAAAAAAAtyC6Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMjk2NDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjI5NjQ4cQFhLgEAAAAAAAAApa/HRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMDk2MDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTA5NjAwcQFhLgEAAAAAAAAAcAe6Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0OTEwODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDkxMDg4cQFhLgEAAAAAAAAAAJi3Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5MTI5NzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTEyOTc2cQFhLgEAAAAAAAAAIkjBRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMDk4ODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTA5ODg4cQFhLgEAAAAAAAAAg7OuRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NDUyOTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDQ1Mjk2cQFhLgEAAAAAAAAAq3vYRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyNjMyODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjYzMjgwcQFhLgEAAAAAAAAAhB3FRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNTQ4MDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjU0ODAwcQFhLgEAAAAAAAAAioGmRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMjE1ODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjIxNTg0cQFhLgEAAAAAAAAAA0K7Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MTY1OTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDE2NTkycQFhLgEAAAAAAAAAivXHRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzNDI3NjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzQyNzY4cQFhLgEAAAAAAAAAO57ERg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxNjY0MzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTY2NDMycQFhLgEAAAAAAAAAyMrXRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNDUxMDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTQ1MTA0cQFhLgEAAAAAAAAAJfnHRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5MzA0NDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTMwNDQ4cQFhLgEAAAAAAAAAyLznRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMzczMjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjM3MzI4cQFhLgEAAAAAAAAApyXNRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NzE2MDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDcxNjAwcQFhLgEAAAAAAAAABXSmRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMjc3NDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTI3NzQ0cQFhLgEAAAAAAAAAJbPXRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyNDc5MjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjQ3OTIwcQFhLgEAAAAAAAAAzpbqRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDAzNzQzNTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwMzc0MzUycQFhLgEAAAAAAAAA3PnMRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNzQ5NjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTc0OTYwcQFhLgEAAAAAAAAAJSPZRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzMjU3NzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzI1Nzc2cQFhLgEAAAAAAAAANXi9Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNzkzNzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTc5Mzc2cQFhLgEAAAAAAAAAr43QRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyMjI0ODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjIyNDgwcQFhLgEAAAAAAAAAVCbvRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyOTM2ODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjkzNjgwcQFhLgEAAAAAAAAAPZbxRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0ODIzNTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDgyMzUycQFhLgEAAAAAAAAASfnkRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0Mzc2MTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDM3NjE2cQFhLgEAAAAAAAAAsWTxRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyOTYzNjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3Mjk2MzY4cQFhLgEAAAAAAAAA2GXhRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MTU2MzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDE1NjMycQFhLgEAAAAAAAAAbEfWRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NTg3MzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDU4NzM2cQFhLgEAAAAAAAAAEmzhRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyNzU0NzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjc1NDcycQFhLgEAAAAAAAAAynvVRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMTA2NDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjEwNjQwcQFhLgEAAAAAAAAAC5zkRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5OTY0MDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTk2NDAwcQFhLgEAAAAAAAAANAvsRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxOTUxODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTk1MTg0cQFhLgEAAAAAAAAAKY7nRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNzg1MTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3Mjc4NTEycQFhLgEAAAAAAAAAJFrSRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyNTMwMDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjUzMDA4cQFhLgEAAAAAAAAAQqT2Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NTkzMTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDU5MzEycQFhLgEAAAAAAAAA2KfrRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxNTI3MDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTUyNzA0cQFhLgEAAAAAAAAAu0sERw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDAzNzY2NTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwMzc2NjU2cQFhLgEAAAAAAAAAYMX8Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyNTk5MjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjU5OTIwcQFhLgEAAAAAAAAA3KDjRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0Mjg2ODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDI4Njg4cQFhLgEAAAAAAAAAR7H0Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxNjkzNjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTY5MzYwcQFhLgEAAAAAAAAAv7XkRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNDMxODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjQzMTg0cQFhLgEAAAAAAAAAHJ34Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyMjQ2ODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjI0Njg4cQFhLgEAAAAAAAAAyfb8Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0Njg0MzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDY4NDMycQFhLgEAAAAAAAAAwoP7Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDAzNzU3OTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwMzc1NzkycQFhLgEAAAAAAAAAudLmRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxOTQzMjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTk0MzIwcQFhLgEAAAAAAAAAqRMHRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0Njg2MjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDY4NjI0cQFhLgEAAAAAAAAAIUMNRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNDcwMjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTQ3MDI0cQFhLgEAAAAAAAAAPqkORw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5MDA1OTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTAwNTkycQFhLgEAAAAAAAAAvR8LRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NjkyMzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTY5MjMycQFhLgEAAAAAAAAAO6gXRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MTczNjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDE3MzYwcQFhLgEAAAAAAAAAtOL/Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA1MDI2MDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNTAyNjA4cQFhLgEAAAAAAAAA89sERw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNTA3NjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTUwNzY4cQFhLgEAAAAAAAAAqzDjRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MjEzOTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDIxMzkycQFhLgEAAAAAAAAAVZ4JRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMDkzMTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTA5MzEycQFhLgEAAAAAAAAAMf8JRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NDY5MjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDQ2OTI4cQFhLgEAAAAAAAAAvSQJRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NDU5MDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTQ1OTA0cQFhLgEAAAAAAAAA6mATRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTUwMTI1MjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU1MDEyNTI4cQFhLgEAAAAAAAAAr2YSRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0OTU2MDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDk1NjAwcQFhLgEAAAAAAAAAmqT1Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0OTA3MDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDkwNzA0cQFhLgEAAAAAAAAAqwkFRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzMjIwMzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzIyMDMycQFhLgEAAAAAAAAA09LbRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0Mjg3ODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDI4Nzg0cQFhLgEAAAAAAAAAq4cTRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxODQ2MjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTg0NjI0cQFhLgEAAAAAAAAABBsnRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNTEwNTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjUxMDU2cQFhLgEAAAAAAAAA7TcbRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NTc0ODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDU3NDg4cQFhLgEAAAAAAAAA6rceRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNDYxNjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTQ2MTYwcQFhLgEAAAAAAAAA7lIbRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyOTk3NjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjk5NzYwcQFhLgEAAAAAAAAA7yUORw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMzY4NjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTM2ODY0cQFhLgEAAAAAAAAAt2QVRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0Njk4NzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDY5ODcycQFhLgEAAAAAAAAAK8kcRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5MzI4NDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTMyODQ4cQFhLgEAAAAAAAAAQZT7Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyNjg1NjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjY4NTYwcQFhLgEAAAAAAAAAh58oRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MTEwMjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDExMDI0cQFhLgEAAAAAAAAAbu4cRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NTU0MDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTU1NDA4cQFhLgEAAAAAAAAAMxEyRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ4OTY4NDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0ODk2ODQ4cQFhLgEAAAAAAAAA5ygRRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTUwMDYwOTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU1MDA2MDk2cQFhLgEAAAAAAAAAj+wbRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyNDczNDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjQ3MzQ0cQFhLgEAAAAAAAAAdc8GRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MTE0MDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDExNDA4cQFhLgEAAAAAAAAADy4TRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTUwMDQyNzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU1MDA0MjcycQFhLgEAAAAAAAAAg1syRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NDY4MzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDQ2ODMycQFhLgEAAAAAAAAAF/kQRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NjY5MjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTY2OTI4cQFhLgEAAAAAAAAAmBUnRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5Mjg2MjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTI4NjI0cQFhLgEAAAAAAAAAeUgCRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNTQ5OTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTU0OTkycQFhLgEAAAAAAAAA0sUbRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NTg5MjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDU4OTI4cQFhLgEAAAAAAAAApwcPRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxNjYwMDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTY2MDAwcQFhLgEAAAAAAAAAwR4bRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNjI3NjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTYyNzY4cQFhLgEAAAAAAAAAz5s0Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzMjA1OTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzIwNTkycQFhLgEAAAAAAAAAQCkoRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NjUyNjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDY1MjY0cQFhLgEAAAAAAAAArj0gRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNzIwODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjcyMDgwcQFhLgEAAAAAAAAAosgYRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5OTAzNTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTkwMzUycQFhLgEAAAAAAAAA7aggRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0Mzk3MjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDM5NzI4cQFhLgEAAAAAAAAAuKsiRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxNjQ1MTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTY0NTEycQFhLgEAAAAAAAAAwR4zRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NzM0MjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDczNDI0cQFhLgEAAAAAAAAAWfgtRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxODEyNjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTgxMjY0cQFhLgEAAAAAAAAAxVkjRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0ODc3MjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDg3NzI4cQFhLgEAAAAAAAAAqRAeRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxNTM5NTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTUzOTUycQFhLgEAAAAAAAAAkj40Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMjE2MDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTIxNjAwcQFhLgEAAAAAAAAA39AuRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NzI3ODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTcyNzg0cQFhLgEAAAAAAAAAgXknRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NDA5NzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDQwOTc2cQFhLgEAAAAAAAAA7njtRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMDM2MzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjAzNjMycQFhLgEAAAAAAAAAjVEvRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxODU1ODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTg1NTg0cQFhLgEAAAAAAAAAa50jRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0ODU2MTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDg1NjE2cQFhLgEAAAAAAAAAEEw2Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzMTgzODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzE4Mzg0cQFhLgEAAAAAAAAAR6IvRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyOTQ0NDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3Mjk0NDQ4cQFhLgEAAAAAAAAACAIYRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxODE2ODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTgxNjgwcQFhLgEAAAAAAAAA2OsyRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxNTAwMTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTUwMDE2cQFhLgEAAAAAAAAATAQvRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0ODM1MDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDgzNTA0cQFhLgEAAAAAAAAA1T89Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NzIxMTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTcyMTEycQFhLgEAAAAAAAAA19ImRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNjAwODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjYwMDgwcQFhLgEAAAAAAAAAE80dRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMjk0NTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjI5NDU2cQFhLgEAAAAAAAAAfl8/Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0ODkxNjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDg5MTY4cQFhLgEAAAAAAAAAB7AXRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNDIzMjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjQyMzIwcQFhLgEAAAAAAAAAisUdRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxNzMxMDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTczMTA0cQFhLgEAAAAAAAAAA00/Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxNDcwNDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTQ3MDQwcQFhLgEAAAAAAAAAE18RRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzMTk2MzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzE5NjMycQFhLgEAAAAAAAAAWK8fRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNDM5NTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTQzOTUycQFhLgEAAAAAAAAAI3khRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMzg0MDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTM4NDAwcQFhLgEAAAAAAAAAGQMwRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxMjg2ODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTI4Njg4cQFhLgEAAAAAAAAAr1YtRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzMTY3NTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzE2NzUycQFhLgEAAAAAAAAAwFohRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxMzkwNTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTM5MDU2cQFhLgEAAAAAAAAAJNsuRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzNDA2NTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzQwNjU2cQFhLgEAAAAAAAAA+wEnRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNjI4NjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjYyODY0cQFhLgEAAAAAAAAAvBgkRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMDEyMzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjAxMjMycQFhLgEAAAAAAAAAm2ggRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxNjAyODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTYwMjg4cQFhLgEAAAAAAAAAqU01Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDAzNzYwODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwMzc2MDgwcQFhLgEAAAAAAAAALdzwRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MDkyMDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDA5MjAwcQFhLgEAAAAAAAAAmPciRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDAzNzYxNzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwMzc2MTc2cQFhLgEAAAAAAAAAgkUmRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMDY2MDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjA2NjA4cQFhLgEAAAAAAAAA4KUcRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNjIxOTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjYyMTkycQFhLgEAAAAAAAAA8RYwRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxOTAxOTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTkwMTkycQFhLgEAAAAAAAAAhzwvRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NzczOTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTc3MzkycQFhLgEAAAAAAAAA3T0nRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNTkwMjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTU5MDI0cQFhLgEAAAAAAAAArVcyRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyOTM4MDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjkzODA4cQFhLgEAAAAAAAAAxckmRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMTg1MTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjE4NTEycQFhLgEAAAAAAAAABZM7Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMDE1MzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTAxNTM2cQFhLgEAAAAAAAAApbs3Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxNTE3NDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTUxNzQ0cQFhLgEAAAAAAAAABMw9Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NDA1OTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDQwNTkycQFhLgEAAAAAAAAAgAk+Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNjE5MDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjYxOTA0cQFhLgEAAAAAAAAAjk8rRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNTc5NjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTU3OTY4cQFhLgEAAAAAAAAACTczRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMTMyNDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTEzMjQ4cQFhLgEAAAAAAAAAbaMPRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MTk2NjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDE5NjY0cQFhLgEAAAAAAAAApbonRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MTgxMjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDE4MTI4cQFhLgEAAAAAAAAAx6oYRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NjQ4MTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTY0ODE2cQFhLgEAAAAAAAAAnn0VRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxODY1NzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTg2NTc2cQFhLgEAAAAAAAAAHkEhRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0OTgwMDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDk4MDAwcQFhLgEAAAAAAAAA5HAoRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNTI3ODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTUyNzg0cQFhLgEAAAAAAAAAAWMvRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNTczOTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTU3MzkycQFhLgEAAAAAAAAAI7M1Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxOTQwMzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTk0MDMycQFhLgEAAAAAAAAAyvg3Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyOTEyODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjkxMjgwcQFhLgEAAAAAAAAADlspRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MjIyNTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDIyMjU2cQFhLgEAAAAAAAAAaQ8wRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNDc5ODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjQ3OTg0cQFhLgEAAAAAAAAAAQc6Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NDgxMTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTQ4MTEycQFhLgEAAAAAAAAACVkyRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxOTU5NTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTk1OTUycQFhLgEAAAAAAAAAljY0Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMzEwODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjMxMDg4cQFhLgEAAAAAAAAA5dgrRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NDg5NDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDQ4OTQ0cQFhLgEAAAAAAAAA4JxGRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNzU2MzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3Mjc1NjMycQFhLgEAAAAAAAAACx4vRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NzQ4NjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDc0ODY0cQFhLgEAAAAAAAAAVpo8Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMzcxMzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjM3MTM2cQFhLgEAAAAAAAAAec9DRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyOTc0MjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3Mjk3NDI0cQFhLgEAAAAAAAAABi5CRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxNjA0ODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTYwNDgwcQFhLgEAAAAAAAAApNgpRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyMzgwMzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjM4MDMycQFhLgEAAAAAAAAAru5CRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNTM3NDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjUzNzQ0cQFhLgEAAAAAAAAAckdARw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxNzk5MjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTc5OTIwcQFhLgEAAAAAAAAA12QdRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTUwMDk4NDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU1MDA5ODQwcQFhLgEAAAAAAAAAI7k1Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNzI3NTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTcyNzUycQFhLgEAAAAAAAAA441ARw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5ODQ0MDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTg0NDAwcQFhLgEAAAAAAAAA8gFHRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTUwMDIzNTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU1MDAyMzUycQFhLgEAAAAAAAAAf6paRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMTg4OTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjE4ODk2cQFhLgEAAAAAAAAA3HsxRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NjM5MjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDYzOTIwcQFhLgEAAAAAAAAAxVUiRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MjQzNjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDI0MzY4cQFhLgEAAAAAAAAAPbwyRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyODM0MDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjgzNDA4cQFhLgEAAAAAAAAAkklRRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5OTQ5NjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTk0OTYwcQFhLgEAAAAAAAAAG6I4Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyNzc3NzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjc3Nzc2cQFhLgEAAAAAAAAAZdxHRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNzgxMjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTc4MTI4cQFhLgEAAAAAAAAA39QwRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5Nzk1MDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTc5NTA0cQFhLgEAAAAAAAAA0uJARw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNjg2MjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTY4NjI0cQFhLgEAAAAAAAAAtr1ERw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyMjYxMjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjI2MTI4cQFhLgEAAAAAAAAAQPpSRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTUwMDA3MjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU1MDAwNzIwcQFhLgEAAAAAAAAAf08sRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MjgyMDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDI4MjA4cQFhLgEAAAAAAAAAoOxFRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxODA5MTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTgwOTEycQFhLgEAAAAAAAAAMN5CRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxNTI4MDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTUyODAwcQFhLgEAAAAAAAAAqOgnRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA1MDMyODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNTAzMjgwcQFhLgEAAAAAAAAAdA5ORw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMTQwMTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTE0MDE2cQFhLgEAAAAAAAAAGJVARw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxNjAwOTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTYwMDk2cQFhLgEAAAAAAAAAycZDRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0ODAyNDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDgwMjQwcQFhLgEAAAAAAAAASwwwRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MzY4NDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDM2ODQ4cQFhLgEAAAAAAAAAHXMmRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNDExNjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjQxMTY4cQFhLgEAAAAAAAAAAFEoRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMjcwNzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTI3MDcycQFhLgEAAAAAAAAALu1JRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMzEyMDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTMxMjAwcQFhLgEAAAAAAAAA8NUyRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMzYzODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTM2Mzg0cQFhLgEAAAAAAAAAa8hMRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA1MDIzMjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNTAyMzIwcQFhLgEAAAAAAAAAtI8zRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA1MDUxMDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNTA1MTA0cQFhLgEAAAAAAAAAzaFLRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0ODQwODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDg0MDgwcQFhLgEAAAAAAAAAOn9KRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NzcyNjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDc3MjY0cQFhLgEAAAAAAAAA6zJARw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NTIxMTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDUyMTEycQFhLgEAAAAAAAAADtI1Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzNDQyMDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzQ0MjA4cQFhLgEAAAAAAAAA+RYyRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzNDQ4ODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzQ0ODgwcQFhLgEAAAAAAAAAb8U7Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyNDY3NjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjQ2NzY4cQFhLgEAAAAAAAAAKfo4Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMzAzMzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTMwMzM2cQFhLgEAAAAAAAAAe/pCRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxMzQ3MzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTM0NzM2cQFhLgEAAAAAAAAAqj82Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ4OTY1NjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0ODk2NTYwcQFhLgEAAAAAAAAA9DpARw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNDEwNzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTQxMDcycQFhLgEAAAAAAAAAuQdBRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNzExMjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjcxMTIwcQFhLgEAAAAAAAAABq06Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMDI0ODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjAyNDgwcQFhLgEAAAAAAAAATZ5KRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5OTc2NDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTk3NjQ4cQFhLgEAAAAAAAAAsB5MRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNjUxNjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTY1MTY4cQFhLgEAAAAAAAAArUxHRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMwOTg4NDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMDk4ODQ4cQFhLgEAAAAAAAAAkmFZRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NDUyMzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTQ1MjMycQFhLgEAAAAAAAAAdTExRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyNDYzODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjQ2Mzg0cQFhLgEAAAAAAAAA4btTRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMDA2NTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjAwNjU2cQFhLgEAAAAAAAAAk5hIRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxOTU4NTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTk1ODU2cQFhLgEAAAAAAAAAwpxLRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MDc4NTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDA3ODU2cQFhLgEAAAAAAAAAdHJURw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzMzk5ODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzM5OTg0cQFhLgEAAAAAAAAAP3VQRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMzkwNzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTM5MDcycQFhLgEAAAAAAAAAhSFMRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0Nzg1MTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDc4NTEycQFhLgEAAAAAAAAAAsVERw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNTk4ODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjU5ODg4cQFhLgEAAAAAAAAAic5eRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0ODU5MDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDg1OTA0cQFhLgEAAAAAAAAAWF47Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzMjYzNTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzI2MzUycQFhLgEAAAAAAAAAYvBSRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNTM4NDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjUzODQwcQFhLgEAAAAAAAAAr0RFRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMTEyMzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTExMjMycQFhLgEAAAAAAAAA2eY+Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NDE1ODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTQxNTg0cQFhLgEAAAAAAAAASYhKRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NjUzNjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDY1MzYwcQFhLgEAAAAAAAAAjQI/Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0OTgyODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDk4Mjg4cQFhLgEAAAAAAAAA7R1WRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNzQ0ODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTc0NDgwcQFhLgEAAAAAAAAAzQIxRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNTUxODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTU1MTg0cQFhLgEAAAAAAAAA+QhkRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMDYzMzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTA2MzM2cQFhLgEAAAAAAAAAILQzRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxNTA2ODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTUwNjg4cQFhLgEAAAAAAAAAeU9aRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NDgzMDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTQ4MzA0cQFhLgEAAAAAAAAAxf9PRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxNzI4MTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTcyODE2cQFhLgEAAAAAAAAAhh9gRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxODkzMjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTg5MzI4cQFhLgEAAAAAAAAAfuBURw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NTI3ODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDUyNzg0cQFhLgEAAAAAAAAAt01ORw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0ODI1NDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDgyNTQ0cQFhLgEAAAAAAAAAn2c+Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxNzg2NzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTc4NjcycQFhLgEAAAAAAAAACTQ9Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0OTc2MTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDk3NjE2cQFhLgEAAAAAAAAAicpLRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA1MDQwNDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNTA0MDQ4cQFhLgEAAAAAAAAA7mZDRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxNjY0ODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTY2NDgwcQFhLgEAAAAAAAAAloZRRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0Njk2ODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDY5NjgwcQFhLgEAAAAAAAAAe6ZbRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxMjg4ODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTI4ODgwcQFhLgEAAAAAAAAA3hlPRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMzM2ODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjMzNjgwcQFhLgEAAAAAAAAA+t9LRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxNjUzNzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTY1Mzc2cQFhLgEAAAAAAAAA/SpNRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxMzg2NzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTM4NjcycQFhLgEAAAAAAAAA2hxNRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMTEwNDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTExMDQwcQFhLgEAAAAAAAAAubZJRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NTQ1NjY4NzQ4OTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjU0NTY2ODc0ODk2cQFhLgEAAAAAAAAAlrpaRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNjQwMTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjY0MDE2cQFhLgEAAAAAAAAA9dxSRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzNDkyOTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzQ5Mjk2cQFhLgEAAAAAAAAAW6VRRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NTQ1NjY5NjExMDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjU0NTY2OTYxMTA0cQFhLgEAAAAAAAAA1LY0Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NTQ1NjY5NDIzODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjU0NTY2OTQyMzg0cQFhLgEAAAAAAAAAjtFTRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyNTIyNDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjUyMjQwcQFhLgEAAAAAAAAAEzBfRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5Njc3OTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTY3NzkycQFhLgEAAAAAAAAAL95HRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA1MDI4OTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNTAyODk2cQFhLgEAAAAAAAAAs+RVRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0Mzg3NjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDM4NzY4cQFhLgEAAAAAAAAATGg8Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NTQ1NjY4ODgyNDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjU0NTY2ODg4MjQwcQFhLgEAAAAAAAAAQGxPRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NTQ1NjY5NTQ1NzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjU0NTY2OTU0NTc2cQFhLgEAAAAAAAAAD90/Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMTQ0ODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjE0NDgwcQFhLgEAAAAAAAAA0yxTRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNzE3OTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjcxNzkycQFhLgEAAAAAAAAAZW5ORw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMDIwOTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjAyMDk2cQFhLgEAAAAAAAAASBlVRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMTUzNDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjE1MzQ0cQFhLgEAAAAAAAAAQ1FIRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxNzA2MDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTcwNjA4cQFhLgEAAAAAAAAAr0ZBRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NzAwNjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDcwMDY0cQFhLgEAAAAAAAAASuM8Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NTk1MzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTU5NTM2cQFhLgEAAAAAAAAAWHFWRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNTIzMDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjUyMzA0cQFhLgEAAAAAAAAA6II9Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5OTUzNDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTk1MzQ0cQFhLgEAAAAAAAAAzHBWRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxODk4MDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTg5ODA4cQFhLgEAAAAAAAAAXIlGRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyNjQ5MTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjY0OTEycQFhLgEAAAAAAAAADtpARw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0OTU2OTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDk1Njk2cQFhLgEAAAAAAAAAel5JRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzMzg3MzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzM4NzM2cQFhLgEAAAAAAAAAJmtWRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NjUyMDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTY1MjAwcQFhLgEAAAAAAAAA3utQRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyMjA2NTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjIwNjU2cQFhLgEAAAAAAAAAnw1KRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5MDExNjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTAxMTY4cQFhLgEAAAAAAAAAtiRLRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMzQ5MjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjM0OTI4cQFhLgEAAAAAAAAA56xYRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzMTI3MjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzEyNzIwcQFhLgEAAAAAAAAA7JFiRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MDg5MTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDA4OTEycQFhLgEAAAAAAAAAEdRNRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNzY1OTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3Mjc2NTkycQFhLgEAAAAAAAAAQZpWRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MTIwODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDEyMDgwcQFhLgEAAAAAAAAALNVVRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyNTk1MzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjU5NTM2cQFhLgEAAAAAAAAAEjpaRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5MDA2ODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTAwNjg4cQFhLgEAAAAAAAAAx/pVRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzMjc4ODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzI3ODg4cQFhLgEAAAAAAAAAHQpHRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDAzNzgwOTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwMzc4MDk2cQFhLgEAAAAAAAAAqHxMRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MjMxMjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDIzMTIwcQFhLgEAAAAAAAAAW+RHRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NDI2NDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTQyNjQwcQFhLgEAAAAAAAAAJ8FgRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNTQxMjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjU0MTI4cQFhLgEAAAAAAAAA5UFcRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NTQ1NjY4NTAwMzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjU0NTY2ODUwMDMycQFhLgEAAAAAAAAAXJ5rRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMjM4ODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjIzODg4cQFhLgEAAAAAAAAAsHNORw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NTQ1NjY5MDU1MjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjU0NTY2OTA1NTIwcQFhLgEAAAAAAAAAqCVZRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5OTUwNTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTk1MDU2cQFhLgEAAAAAAAAA64NaRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyNDYwMDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjQ2MDAwcQFhLgEAAAAAAAAAqb5fRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyOTg1MTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjk4NTEycQFhLgEAAAAAAAAAoqFRRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxODM0NzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTgzNDcycQFhLgEAAAAAAAAA5ERMRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ4ODkwNzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0ODg5MDcycQFhLgEAAAAAAAAAw1RPRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxOTk3OTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTk5NzkycQFhLgEAAAAAAAAAyaxlRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzMjEwNzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzIxMDcycQFhLgEAAAAAAAAALNRLRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NDkxNjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTQ5MTY4cQFhLgEAAAAAAAAA9kNmRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NTQ1NjY5NDM5MjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjU0NTY2OTQzOTIwcQFhLgEAAAAAAAAA/chcRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxNjg3ODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTY4Nzg0cQFhLgEAAAAAAAAAhiZ+Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MzAwMzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDMwMDMycQFhLgEAAAAAAAAA6YtCRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0ODk3NDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDg5NzQ0cQFhLgEAAAAAAAAANPRHRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNTM1NTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjUzNTUycQFhLgEAAAAAAAAA23ZTRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyNDA0MzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjQwNDMycQFhLgEAAAAAAAAAWho9Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyNjQxNDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjY0MTQ0cQFhLgEAAAAAAAAAXIZjRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyNTM0ODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjUzNDg4cQFhLgEAAAAAAAAAdJJYRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzNTAwNjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzUwMDY0cQFhLgEAAAAAAAAAW9Q3Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTUwMDY5NjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU1MDA2OTYwcQFhLgEAAAAAAAAAdmBmRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMTM4MDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjEzODA4cQFhLgEAAAAAAAAAQcpZRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxODMxODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTgzMTg0cQFhLgEAAAAAAAAAMqxNRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzMjY3MzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzI2NzM2cQFhLgEAAAAAAAAApPlERw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyNDQ2NTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjQ0NjU2cQFhLgEAAAAAAAAArR96Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxODg4ODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTg4ODgwcQFhLgEAAAAAAAAAKvZqRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "grad_norm_manager_T": [
+ 7092,
+ 17295,
+ 27360,
+ 37425,
+ 47446,
+ 57702,
+ 67766,
+ 77887,
+ 88117,
+ 98180,
+ 108201,
+ 118363,
+ 128491,
+ 138566,
+ 148760,
+ 158815,
+ 169025,
+ 179049,
+ 189282,
+ 199401,
+ 209460,
+ 219584,
+ 229726,
+ 239732,
+ 249735,
+ 259775,
+ 269808,
+ 279861,
+ 289940,
+ 299973,
+ 310048,
+ 320136,
+ 330150,
+ 340224,
+ 350309,
+ 360399,
+ 370496,
+ 380586,
+ 390819,
+ 400869,
+ 410935,
+ 420950,
+ 431080,
+ 441210,
+ 451272,
+ 461380,
+ 471442,
+ 481588,
+ 491642,
+ 501803,
+ 511970,
+ 522049,
+ 532228,
+ 542420,
+ 552444,
+ 562617,
+ 572728,
+ 583006,
+ 593033,
+ 603151,
+ 613362,
+ 623380,
+ 633578,
+ 643643,
+ 653716,
+ 663826,
+ 673980,
+ 684046,
+ 694187,
+ 704361,
+ 714366,
+ 724371,
+ 734482,
+ 744535,
+ 754765,
+ 764823,
+ 774937,
+ 785022,
+ 795248,
+ 805473,
+ 815528,
+ 825625,
+ 835868,
+ 845907,
+ 856040,
+ 866047,
+ 876264,
+ 886567,
+ 896651,
+ 906842,
+ 916946,
+ 927044,
+ 937159,
+ 947297,
+ 957483,
+ 967619,
+ 977685,
+ 987931,
+ 997992,
+ 1008057,
+ 1018161,
+ 1028173,
+ 1038181,
+ 1048423,
+ 1058516,
+ 1068664,
+ 1078745,
+ 1088915,
+ 1098982,
+ 1109051,
+ 1119248,
+ 1129476,
+ 1139634,
+ 1149642,
+ 1159787,
+ 1169921,
+ 1179950,
+ 1190126,
+ 1200324,
+ 1210423,
+ 1220527,
+ 1230722,
+ 1240952,
+ 1250992,
+ 1261097,
+ 1271343,
+ 1281413,
+ 1291499,
+ 1301710,
+ 1311847,
+ 1321892,
+ 1331977,
+ 1341990,
+ 1352171,
+ 1362304,
+ 1372388,
+ 1382502,
+ 1392710,
+ 1402796,
+ 1412887,
+ 1422977,
+ 1433185,
+ 1443192,
+ 1453280,
+ 1463367,
+ 1473426,
+ 1483538,
+ 1493651,
+ 1503789,
+ 1513807,
+ 1524054,
+ 1534270,
+ 1544475,
+ 1554621,
+ 1564794,
+ 1574902,
+ 1584946,
+ 1594969,
+ 1605030,
+ 1615240,
+ 1625376,
+ 1635501,
+ 1645593,
+ 1655647,
+ 1665666,
+ 1675690,
+ 1685894,
+ 1695985,
+ 1706141,
+ 1716287,
+ 1726355,
+ 1736463,
+ 1746631,
+ 1756661,
+ 1766740,
+ 1776977,
+ 1787103,
+ 1797200,
+ 1807240,
+ 1817361,
+ 1827400,
+ 1837414,
+ 1847427,
+ 1857487,
+ 1867647,
+ 1877732,
+ 1887967,
+ 1898042,
+ 1908247,
+ 1918306,
+ 1928494,
+ 1938721,
+ 1948923,
+ 1959067,
+ 1969319,
+ 1979533,
+ 1989846,
+ 1999888,
+ 2009996,
+ 2020061,
+ 2030264,
+ 2040327,
+ 2050460,
+ 2060522,
+ 2070580,
+ 2080743,
+ 2090936,
+ 2101068,
+ 2111206,
+ 2121377,
+ 2131404,
+ 2141639,
+ 2151758,
+ 2161825,
+ 2171926,
+ 2181983,
+ 2192015,
+ 2202021,
+ 2212255,
+ 2222394,
+ 2232596,
+ 2242789,
+ 2252936,
+ 2262978,
+ 2273109,
+ 2283149,
+ 2293150,
+ 2303299,
+ 2313529,
+ 2323715,
+ 2333847,
+ 2343908,
+ 2354027,
+ 2364236,
+ 2374408,
+ 2384421,
+ 2394449,
+ 2404570,
+ 2414720,
+ 2424739,
+ 2434910,
+ 2444954,
+ 2455136,
+ 2465287,
+ 2475416,
+ 2485480,
+ 2495611,
+ 2505778,
+ 2516009,
+ 2526028,
+ 2536301,
+ 2546337,
+ 2556388,
+ 2566565,
+ 2576644,
+ 2586843,
+ 2596917,
+ 2607004,
+ 2617084,
+ 2627196,
+ 2637376,
+ 2647508,
+ 2657647,
+ 2667719,
+ 2677916,
+ 2688062,
+ 2698165,
+ 2708266,
+ 2718409,
+ 2728434,
+ 2738546,
+ 2748676,
+ 2758916,
+ 2768940,
+ 2779034,
+ 2789172,
+ 2799238,
+ 2809325,
+ 2819376,
+ 2829617,
+ 2839810,
+ 2850035,
+ 2860186,
+ 2870357,
+ 2880499,
+ 2890628,
+ 2900645,
+ 2910940,
+ 2921130,
+ 2931376,
+ 2941396,
+ 2951566,
+ 2961627,
+ 2971677,
+ 2981706,
+ 2991857,
+ 3001949,
+ 3012036,
+ 3022219,
+ 3032228,
+ 3042327,
+ 3052539,
+ 3062795,
+ 3072890,
+ 3082958,
+ 3093114,
+ 3103171,
+ 3113209,
+ 3123383,
+ 3133426,
+ 3143440,
+ 3153512,
+ 3163568,
+ 3173608,
+ 3183697,
+ 3193908,
+ 3203975,
+ 3214077,
+ 3224153,
+ 3234356,
+ 3244419,
+ 3254536,
+ 3264737,
+ 3274833,
+ 3284908,
+ 3295136,
+ 3305228,
+ 3315439,
+ 3325546,
+ 3335646,
+ 3345788,
+ 3355992,
+ 3366175,
+ 3376417,
+ 3386451,
+ 3396577,
+ 3406704,
+ 3416900,
+ 3427128,
+ 3437217,
+ 3447329,
+ 3457428,
+ 3467437,
+ 3477621,
+ 3487827,
+ 3497962,
+ 3508158,
+ 3518325,
+ 3528428,
+ 3538602,
+ 3548805,
+ 3558923,
+ 3569065,
+ 3579228,
+ 3589313,
+ 3599339,
+ 3609365,
+ 3619431,
+ 3629497,
+ 3639722,
+ 3649807,
+ 3660057,
+ 3670255,
+ 3680439,
+ 3690694,
+ 3700742,
+ 3710792,
+ 3720932,
+ 3731140,
+ 3741222,
+ 3751315,
+ 3761564,
+ 3771760,
+ 3781886,
+ 3792129,
+ 3802264,
+ 3812528,
+ 3822619,
+ 3832758,
+ 3842873,
+ 3852997,
+ 3863097,
+ 3873229,
+ 3883380,
+ 3893402,
+ 3903403,
+ 3913426,
+ 3923558,
+ 3933711,
+ 3943715,
+ 3953867,
+ 3963931,
+ 3974121,
+ 3984302,
+ 3994504,
+ 4004625,
+ 4014639,
+ 4024664,
+ 4034759,
+ 4044842
+ ],
+ "grad_norm_worker": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNTY3MjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjU2NzIwcQFhLgEAAAAAAAAANTlMRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NTQyMjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDU0MjI0cQFhLgEAAAAAAAAAxp0PRQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5MTM5MzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTEzOTM2cQFhLgEAAAAAAAAATG0aRQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyODI0NDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjgyNDQ4cQFhLgEAAAAAAAAAUgy7RQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMTQyODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjE0Mjg4cQFhLgEAAAAAAAAAMGrkRQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NDg5NzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTQ4OTc2cQFhLgEAAAAAAAAAWeq1RA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5MDMxODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTAzMTg0cQFhLgEAAAAAAAAAUsW5RQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5MjgzMzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTI4MzM2cQFhLgEAAAAAAAAAO0DhRQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5MTg5MjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTE4OTI4cQFhLgEAAAAAAAAAH332RQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMDgwNDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjA4MDQ4cQFhLgEAAAAAAAAAj23URQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA1MDEwNzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNTAxMDcycQFhLgEAAAAAAAAABLTlRQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMDY1MTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjA2NTEycQFhLgEAAAAAAAAAEvPGRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTUwMTIyNDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU1MDEyMjQwcQFhLgEAAAAAAAAAIS20Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyNTAxMjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjUwMTI4cQFhLgEAAAAAAAAAZ/7YRQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyODYzODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3Mjg2Mzg0cQFhLgEAAAAAAAAAmqKQRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMDM1MzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjAzNTM2cQFhLgEAAAAAAAAAXJeURg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5MDU1ODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTA1NTg0cQFhLgEAAAAAAAAABqnKRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ4OTM5NjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0ODkzOTY4cQFhLgEAAAAAAAAAD46uRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxNDI5MTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTQyOTEycQFhLgEAAAAAAAAAN5GoRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMjY2ODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTI2Njg4cQFhLgEAAAAAAAAAD3jgRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxMzc4MDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTM3ODA4cQFhLgEAAAAAAAAAiBk9Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyODQ1NjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3Mjg0NTYwcQFhLgEAAAAAAAAAeyOLRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5MDgxNzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTA4MTc2cQFhLgEAAAAAAAAAbkhTRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0OTg0ODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDk4NDgwcQFhLgEAAAAAAAAA80yzRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNDA2ODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTQwNjg4cQFhLgEAAAAAAAAAC/WNRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MTI5NDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDEyOTQ0cQFhLgEAAAAAAAAAxVULRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NDY3MzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDQ2NzM2cQFhLgEAAAAAAAAAx/pwRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MjY2NzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDI2NjcycQFhLgEAAAAAAAAAcyLqRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNTUzNzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjU1Mzc2cQFhLgEAAAAAAAAAylNsRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxNjc1MzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTY3NTM2cQFhLgEAAAAAAAAAKzDJRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NDcwMjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDQ3MDI0cQFhLgEAAAAAAAAAbP4RRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNTYzMzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTU2MzM2cQFhLgEAAAAAAAAAXhEHRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzMjE4NDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzIxODQwcQFhLgEAAAAAAAAAM1KMRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMTEyMTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjExMjE2cQFhLgEAAAAAAAAA5n/ZRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0OTc3MTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDk3NzEycQFhLgEAAAAAAAAA/L3URg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMzEzNzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjMxMzc2cQFhLgEAAAAAAAAAU8DKRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxNTUwMDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTU1MDA4cQFhLgEAAAAAAAAAFjIPRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxNzkxNTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTc5MTUycQFhLgEAAAAAAAAADGPSRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0Njk0ODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDY5NDg4cQFhLgEAAAAAAAAAXkSaRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5MjQ3ODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTI0Nzg0cQFhLgEAAAAAAAAAhkbpRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMzQ0NjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTM0NDY0cQFhLgEAAAAAAAAAyzoRRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNjQ0OTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTY0NDk2cQFhLgEAAAAAAAAAkowMRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0OTYxNzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDk2MTc2cQFhLgEAAAAAAAAAhZ+LRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyODk0ODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjg5NDg4cQFhLgEAAAAAAAAAChWzRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MTU0NDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDE1NDQwcQFhLgEAAAAAAAAACEDBRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMTc1NTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjE3NTUycQFhLgEAAAAAAAAAM7UORw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ4ODg2ODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0ODg4Njg4cQFhLgEAAAAAAAAAEeIuRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxNTIzMjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTUyMzIwcQFhLgEAAAAAAAAARLy1Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNjAxNzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTYwMTc2cQFhLgEAAAAAAAAAOyGlRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MjA0MzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDIwNDMycQFhLgEAAAAAAAAAQBciRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxODI0MTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTgyNDE2cQFhLgEAAAAAAAAANVkqRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNzQwOTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3Mjc0MDk2cQFhLgEAAAAAAAAAYd6vRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNjY1MTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTY2NTEycQFhLgEAAAAAAAAAvvDXRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDAzNzUyMTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwMzc1MjE2cQFhLgEAAAAAAAAAGymmRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxODkxNjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTg5MTY4cQFhLgEAAAAAAAAA4eg/Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxOTIxNDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTkyMTQ0cQFhLgEAAAAAAAAA3ylARw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxMzY0NjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTM2NDY0cQFhLgEAAAAAAAAA/4eoRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NTA0ODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDUwNDgwcQFhLgEAAAAAAAAA4ql+Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0Nzk5NTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDc5OTUycQFhLgEAAAAAAAAAvGM2Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNTY1MjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTU2NTI4cQFhLgEAAAAAAAAA9dKQRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0OTIyNDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDkyMjQwcQFhLgEAAAAAAAAAPPl6Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxNDg3NjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTQ4NzY4cQFhLgEAAAAAAAAA6SwZRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNDUzOTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjQ1MzkycQFhLgEAAAAAAAAAIBjORg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMzQ3MzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjM0NzM2cQFhLgEAAAAAAAAAsU5KRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MjYyODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDI2Mjg4cQFhLgEAAAAAAAAAeGktRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMjU2MTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjI1NjE2cQFhLgEAAAAAAAAAt5tHRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxMjgxMTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTI4MTEycQFhLgEAAAAAAAAAK9+IRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNjE4MDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjYxODA4cQFhLgEAAAAAAAAAi6C0Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDAzNzM1ODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwMzczNTg0cQFhLgEAAAAAAAAAkoN1Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMzk2MzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjM5NjMycQFhLgEAAAAAAAAAZNJTRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0OTUyMTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDk1MjE2cQFhLgEAAAAAAAAAhvHDRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5OTg5OTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTk4OTkycQFhLgEAAAAAAAAAipNtRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0OTQ0NDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDk0NDQ4cQFhLgEAAAAAAAAABUH8Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTUwMDUyMzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU1MDA1MjMycQFhLgEAAAAAAAAAEpS4Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5MTY0MzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTE2NDMycQFhLgEAAAAAAAAA1/M6Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyODg3MjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjg4NzIwcQFhLgEAAAAAAAAATkpURw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NTkyMTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDU5MjE2cQFhLgEAAAAAAAAAKsdjRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMjMxMzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTIzMTM2cQFhLgEAAAAAAAAA6muYRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MTc2NDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDE3NjQ4cQFhLgEAAAAAAAAAaggCRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNTk3OTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjU5NzkycQFhLgEAAAAAAAAA7lhERw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NDk5MzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTQ5OTM2cQFhLgEAAAAAAAAA92lORw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NjI0MTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTYyNDE2cQFhLgEAAAAAAAAAAPiBRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NDkwNzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTQ5MDcycQFhLgEAAAAAAAAAjpjMRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NTQ1MTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDU0NTEycQFhLgEAAAAAAAAADp7cRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5ODgxNDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTg4MTQ0cQFhLgEAAAAAAAAAQve+Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MzYzNjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDM2MzY4cQFhLgEAAAAAAAAATQE5Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMDYwMzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjA2MDMycQFhLgEAAAAAAAAA9XKARg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MzMyOTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDMzMjk2cQFhLgEAAAAAAAAAu75sRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyNzc0ODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjc3NDg4cQFhLgEAAAAAAAAAI/44Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzNDY1MTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzQ2NTEycQFhLgEAAAAAAAAAN7u3RQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MTExMjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDExMTIwcQFhLgEAAAAAAAAA1P4RRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNDEyNjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTQxMjY0cQFhLgEAAAAAAAAAa4/IRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MjUzMjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDI1MzI4cQFhLgEAAAAAAAAA2DPyRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNDQyNDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTQ0MjQwcQFhLgEAAAAAAAAAOPAHRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMjU1MjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjI1NTIwcQFhLgEAAAAAAAAAfEk2Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMDczOTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTA3MzkycQFhLgEAAAAAAAAAVEJ2Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0ODU0MjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDg1NDI0cQFhLgEAAAAAAAAArzbgRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5MDMwODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTAzMDg4cQFhLgEAAAAAAAAAXeeoRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMDcyMDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTA3MjAwcQFhLgEAAAAAAAAAJ0sKRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NDUzOTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDQ1MzkycQFhLgEAAAAAAAAADTJFRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyNTc2MTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjU3NjE2cQFhLgEAAAAAAAAAkgkTRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNDkwNDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjQ5MDQwcQFhLgEAAAAAAAAAbDz8RQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMjQ4NDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjI0ODQ4cQFhLgEAAAAAAAAA/HheRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MTY0MDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDE2NDAwcQFhLgEAAAAAAAAAAmAPRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzNDAzNjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzQwMzY4cQFhLgEAAAAAAAAATRzgRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxNjU4NTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTY1ODU2cQFhLgEAAAAAAAAAVTtqRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNDYzNTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTQ2MzUycQFhLgEAAAAAAAAAn++mRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5MDY2NDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTA2NjQwcQFhLgEAAAAAAAAAmRpvRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMzgxOTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjM4MTkycQFhLgEAAAAAAAAAGhFfRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NzI2NTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDcyNjU2cQFhLgEAAAAAAAAAQL4XRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMjU2MzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTI1NjMycQFhLgEAAAAAAAAAVCRHRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyMzk5NTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjM5OTUycQFhLgEAAAAAAAAA9TEyRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDAzNzI0MzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwMzcyNDMycQFhLgEAAAAAAAAAS1I1Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNzgwMzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTc4MDMycQFhLgEAAAAAAAAAqYuIRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzMTc3MTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzE3NzEycQFhLgEAAAAAAAAA3N43Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxODA1MjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTgwNTI4cQFhLgEAAAAAAAAAqMwSRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyMTkxMjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjE5MTIwcQFhLgEAAAAAAAAAX/IiRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyODczNDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3Mjg3MzQ0cQFhLgEAAAAAAAAA060kRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0ODEyOTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDgxMjk2cQFhLgEAAAAAAAAA9g5BRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MzY2NTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDM2NjU2cQFhLgEAAAAAAAAA9E5/Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyODcyNDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3Mjg3MjQ4cQFhLgEAAAAAAAAAljkFRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MTk3NjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDE5NzYwcQFhLgEAAAAAAAAAN7DNRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NjAxNzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDYwMTc2cQFhLgEAAAAAAAAACadrRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyNjc4ODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjY3ODg4cQFhLgEAAAAAAAAAo2cnRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMDg5MTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjA4OTEycQFhLgEAAAAAAAAAjtmeRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5OTg2MDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTk4NjA4cQFhLgEAAAAAAAAAAr0cRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxODc3OTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTg3NzkycQFhLgEAAAAAAAAA0QfpRQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNzE2OTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjcxNjk2cQFhLgEAAAAAAAAAkE/9Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyNDg4ODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjQ4ODgwcQFhLgEAAAAAAAAAt9yvRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NTkwMjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDU5MDI0cQFhLgEAAAAAAAAAVk0uRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxNDUzMTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTQ1MzEycQFhLgEAAAAAAAAADqcSRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDAzNzI4MTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwMzcyODE2cQFhLgEAAAAAAAAANOzfRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyNjQ4MTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjY0ODE2cQFhLgEAAAAAAAAAkWBvRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MzA3MDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDMwNzA0cQFhLgEAAAAAAAAArv49Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxNjYzODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTY2Mzg0cQFhLgEAAAAAAAAA+ranRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNDMyODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjQzMjgwcQFhLgEAAAAAAAAAatrLRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzNDc0NzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzQ3NDcycQFhLgEAAAAAAAAA25QSRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NjkyMDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDY5MjAwcQFhLgEAAAAAAAAAVlVSRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDAzNzU2OTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwMzc1Njk2cQFhLgEAAAAAAAAAilDTRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxOTMyNjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTkzMjY0cQFhLgEAAAAAAAAAoViSRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NjY1MTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDY2NTEycQFhLgEAAAAAAAAAM1NHRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNDQ3MjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTQ0NzIwcQFhLgEAAAAAAAAAsWszRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5MDYyNTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTA2MjU2cQFhLgEAAAAAAAAAfLp3Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NzMwNzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTczMDcycQFhLgEAAAAAAAAAiFFFRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MTcyNjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDE3MjY0cQFhLgEAAAAAAAAAnw8qRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA1MDQ1MjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNTA0NTI4cQFhLgEAAAAAAAAAux1MRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNDkyMzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTQ5MjMycQFhLgEAAAAAAAAAymo/Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MjEyMDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDIxMjAwcQFhLgEAAAAAAAAAUqK8Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMDg0NDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTA4NDQ4cQFhLgEAAAAAAAAAVCOrRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NTA5NjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDUwOTYwcQFhLgEAAAAAAAAA6qnyRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NDM2OTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTQzNjk2cQFhLgEAAAAAAAAAPjcjRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTUwMTI0MzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU1MDEyNDMycQFhLgEAAAAAAAAAcgkwRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0OTk4MjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDk5ODI0cQFhLgEAAAAAAAAAcYxaRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0ODkwNzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDg5MDcycQFhLgEAAAAAAAAANN3oRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzMTMyOTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzEzMjk2cQFhLgEAAAAAAAAA5OXoRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0Mjk0NTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDI5NDU2cQFhLgEAAAAAAAAAvhEJRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxODY3MzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTg2NzM2cQFhLgEAAAAAAAAAOrM2Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNDQ1MjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjQ0NTI4cQFhLgEAAAAAAAAAyoxQRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NTM0NTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDUzNDU2cQFhLgEAAAAAAAAALY4eRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNDMzNzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTQzMzc2cQFhLgEAAAAAAAAAHHAmRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyOTc2NDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjk3NjQ4cQFhLgEAAAAAAAAAKAfkRQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMzMwMjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTMzMDI0cQFhLgEAAAAAAAAA2yo4Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0Njc3NjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDY3NzYwcQFhLgEAAAAAAAAA+RkHRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5MjE1MjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTIxNTIwcQFhLgEAAAAAAAAAeuIrRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyNTkwNTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjU5MDU2cQFhLgEAAAAAAAAA0QDyRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MTA2NDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDEwNjQwcQFhLgEAAAAAAAAAxnuCRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NTAyMjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTUwMjI0cQFhLgEAAAAAAAAAnP/yRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ4OTU5ODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0ODk1OTg0cQFhLgEAAAAAAAAADyG2Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTUwMDgwMTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU1MDA4MDE2cQFhLgEAAAAAAAAArngORw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyNDM3OTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjQzNzkycQFhLgEAAAAAAAAAdQYjRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MTIzNjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDEyMzY4cQFhLgEAAAAAAAAAgNDiRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTUwMDM3OTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU1MDAzNzkycQFhLgEAAAAAAAAAPVwWRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NDg4NDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDQ4ODQ4cQFhLgEAAAAAAAAAvweWRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NTgwMDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTU4MDAwcQFhLgEAAAAAAAAAafDWRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5MzA5MjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTMwOTI4cQFhLgEAAAAAAAAAcgqGRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNTM4NDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTUzODQwcQFhLgEAAAAAAAAAHBssRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NjQxMTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDY0MTEycQFhLgEAAAAAAAAADDbJRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxNjk1NTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTY5NTUycQFhLgEAAAAAAAAAG4XQRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNjI5NjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTYyOTYwcQFhLgEAAAAAAAAAfHDlRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyOTk0NzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjk5NDcycQFhLgEAAAAAAAAANcNlRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NjcyODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDY3MjgwcQFhLgEAAAAAAAAA35qrRQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNjk5NjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjY5OTY4cQFhLgEAAAAAAAAAhN0dRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5ODc5NTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTg3OTUycQFhLgEAAAAAAAAAys7PRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0Mzk2MzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDM5NjMycQFhLgEAAAAAAAAAOx4YRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxNjQyMjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTY0MjI0cQFhLgEAAAAAAAAAYhVpRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NzE5ODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDcxOTg0cQFhLgEAAAAAAAAAqtu6Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxNzg0ODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTc4NDgwcQFhLgEAAAAAAAAAD/JORw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0ODcxNTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDg3MTUycQFhLgEAAAAAAAAAhnNYRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxNTM2NjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTUzNjY0cQFhLgEAAAAAAAAA43ZzRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMjA0NDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTIwNDQ4cQFhLgEAAAAAAAAAmxAWRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NjUxMDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTY1MTA0cQFhLgEAAAAAAAAAf2npRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NDI1MTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDQyNTEycQFhLgEAAAAAAAAAeG7XRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMDQ1OTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjA0NTkycQFhLgEAAAAAAAAAkG8YRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxODg5NDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTg4OTQ0cQFhLgEAAAAAAAAAGnMcRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0ODA5MTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDgwOTEycQFhLgEAAAAAAAAA9uWsRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzMTQzNTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzE0MzUycQFhLgEAAAAAAAAAkGM0Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyOTgwMDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3Mjk4MDAwcQFhLgEAAAAAAAAAn4O5Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNzk1NjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTc5NTY4cQFhLgEAAAAAAAAAzWRpRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxNDk3MjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTQ5NzI4cQFhLgEAAAAAAAAANFlQRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0ODY3NjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDg2NzY4cQFhLgEAAAAAAAAAFXWkRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NjIwMzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTYyMDMycQFhLgEAAAAAAAAA5XZMRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNDEzNjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjQxMzYwcQFhLgEAAAAAAAAAF1hsRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMjkwNzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjI5MDcycQFhLgEAAAAAAAAA1Q9ORw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0OTQ1NDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDk0NTQ0cQFhLgEAAAAAAAAAT5J1Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMzQ2NDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjM0NjQwcQFhLgEAAAAAAAAAQ413Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyOTYyNzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3Mjk2MjcycQFhLgEAAAAAAAAAsB90Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxNDUxMjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTQ1MTIwcQFhLgEAAAAAAAAAp1RQRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzMjA3ODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzIwNzg0cQFhLgEAAAAAAAAAbtBnRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNDU2ODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTQ1NjgwcQFhLgEAAAAAAAAAUREBRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMzc1MzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTM3NTM2cQFhLgEAAAAAAAAADvoyRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxMjU5MDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTI1OTA0cQFhLgEAAAAAAAAAIfMnRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzMTc5MDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzE3OTA0cQFhLgEAAAAAAAAA6pEURw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxMzkxNTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTM5MTUycQFhLgEAAAAAAAAAEbUpRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzMzM4NDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzMzODQwcQFhLgEAAAAAAAAAjlY6Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNjEzMjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjYxMzI4cQFhLgEAAAAAAAAAvNg2Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxOTY1MjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTk2NTI4cQFhLgEAAAAAAAAA3fA1Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxNjU2NjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTY1NjY0cQFhLgEAAAAAAAAAOCjMRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDAzNzM2ODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwMzczNjgwcQFhLgEAAAAAAAAAJeRYRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MTQzODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDE0Mzg0cQFhLgEAAAAAAAAAd5dXRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA1MDE5MzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNTAxOTM2cQFhLgEAAAAAAAAAjIEoRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMDA3NTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjAwNzUycQFhLgEAAAAAAAAArE4ARw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNjE0MjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjYxNDI0cQFhLgEAAAAAAAAAkzggRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxOTU2NjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTk1NjY0cQFhLgEAAAAAAAAAjXpERw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NzY3MjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTc2NzIwcQFhLgEAAAAAAAAAgnqvRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNTQ1MTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTU0NTEycQFhLgEAAAAAAAAAYH2qRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyODQ2ODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjg0Njg4cQFhLgEAAAAAAAAAwJAcRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMDY4OTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjA2ODk2cQFhLgEAAAAAAAAAeMcZRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMwOTg0NjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMDk4NDY0cQFhLgEAAAAAAAAAa3NhRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxNTI1MTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTUyNTEycQFhLgEAAAAAAAAAQslpRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NDM1NjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDQzNTY4cQFhLgEAAAAAAAAAUcZCRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNTQzMjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjU0MzIwcQFhLgEAAAAAAAAASEKJRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNTU4NTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTU1ODU2cQFhLgEAAAAAAAAA5iwnRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMDY4MTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTA2ODE2cQFhLgEAAAAAAAAAHCUxRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MTk4NTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDE5ODU2cQFhLgEAAAAAAAAAU8gCRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MjAzMzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDIwMzM2cQFhLgEAAAAAAAAAFvQ/Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NTQyNTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTU0MjU2cQFhLgEAAAAAAAAAbbc9Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxODc2MzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTg3NjMycQFhLgEAAAAAAAAAk9NBRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0OTA4OTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDkwODk2cQFhLgEAAAAAAAAA5VVdRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNTA0ODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTUwNDgwcQFhLgEAAAAAAAAAr3nIRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNTUyODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTU1MjgwcQFhLgEAAAAAAAAAJd0DRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxOTM1NTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTkzNTUycQFhLgEAAAAAAAAAaNc9Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyOTE0NzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjkxNDcycQFhLgEAAAAAAAAAQ1UqRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MjU2MTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDI1NjE2cQFhLgEAAAAAAAAAoKK8Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNTA3NjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjUwNzY4cQFhLgEAAAAAAAAAvn7ORg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5Mzk5NTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTM5OTUycQFhLgEAAAAAAAAALb5IRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxNzgwMDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTc4MDAwcQFhLgEAAAAAAAAAMFJGRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMjk1NTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjI5NTUycQFhLgEAAAAAAAAA2cfaRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NDg1NjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDQ4NTYwcQFhLgEAAAAAAAAAO3P/Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNzUyNDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3Mjc1MjQ4cQFhLgEAAAAAAAAAUQccRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NzMxMzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDczMTM2cQFhLgEAAAAAAAAAoy3JRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMzgyODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjM4Mjg4cQFhLgEAAAAAAAAAQyVIRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyOTgyODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3Mjk4Mjg4cQFhLgEAAAAAAAAAkP/BRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxNjIwMTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTYyMDE2cQFhLgEAAAAAAAAApZtIRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyMzQ1NzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjM0NTc2cQFhLgEAAAAAAAAAvqRMRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNTQ4OTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjU0ODk2cQFhLgEAAAAAAAAABSxqRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxNzgzODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTc4Mzg0cQFhLgEAAAAAAAAAPVfTRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTUwMDY3NjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU1MDA2NzY4cQFhLgEAAAAAAAAAfxFWRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNzI4NDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTcyODQ4cQFhLgEAAAAAAAAAlMM2Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NzY4MTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTc2ODE2cQFhLgEAAAAAAAAAKENjRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5OTg1MTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTk4NTEycQFhLgEAAAAAAAAA5WK5Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMDY5OTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjA2OTkycQFhLgEAAAAAAAAAgpaIRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NjM1MzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDYzNTM2cQFhLgEAAAAAAAAA/RgIRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MTY3ODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDE2Nzg0cQFhLgEAAAAAAAAAs9NARg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyODUyMzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3Mjg1MjMycQFhLgEAAAAAAAAA+zccRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5OTg0MTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTk4NDE2cQFhLgEAAAAAAAAA7svhRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyNzcyMDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjc3MjAwcQFhLgEAAAAAAAAAk+w0Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNzUxNTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTc1MTUycQFhLgEAAAAAAAAAtqTuRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NzU1NjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTc1NTY4cQFhLgEAAAAAAAAAX3GVRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNjMyNDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTYzMjQ4cQFhLgEAAAAAAAAAcUYvRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyMjkwMDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjI5MDA4cQFhLgEAAAAAAAAAugMiRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5ODQwMTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTg0MDE2cQFhLgEAAAAAAAAA+ZCkRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MzE3NjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDMxNzYwcQFhLgEAAAAAAAAAjuiWRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxODE4NzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTgxODcycQFhLgEAAAAAAAAATC7iRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxNDQ3MzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTQ0NzM2cQFhLgEAAAAAAAAAVtdCRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA1MDMwODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNTAzMDg4cQFhLgEAAAAAAAAAr8HJRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMDQ1MTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTA0NTEycQFhLgEAAAAAAAAAiHMdRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxNjUwODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTY1MDg4cQFhLgEAAAAAAAAAF4MeRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NzY4ODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDc2ODgwcQFhLgEAAAAAAAAA3EoQRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MzEwODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDMxMDg4cQFhLgEAAAAAAAAAU6uaRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMjkyNjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjI5MjY0cQFhLgEAAAAAAAAADuu5Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMjc0NTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTI3NDU2cQFhLgEAAAAAAAAA27PfRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMzQ4NDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTM0ODQ4cQFhLgEAAAAAAAAAGxTDRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMzgxMTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTM4MTEycQFhLgEAAAAAAAAAvBe1Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA1MDQ3MjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNTA0NzIwcQFhLgEAAAAAAAAA9+kORw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA1MDQ5MTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNTA0OTEycQFhLgEAAAAAAAAAzbaYRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0Nzk2NjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDc5NjY0cQFhLgEAAAAAAAAAhIc3Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0Nzk0NzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDc5NDcycQFhLgEAAAAAAAAA+wbjRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NTU5NTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDU1OTUycQFhLgEAAAAAAAAAQ+CRRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzNDEzMjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzQxMzI4cQFhLgEAAAAAAAAAvgEZRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzNDQ0MDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzQ0NDAwcQFhLgEAAAAAAAAAl3EkRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyMzU0NDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjM1NDQwcQFhLgEAAAAAAAAAoeUjRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMjY4ODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTI2ODgwcQFhLgEAAAAAAAAACb+6Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxMzQxNjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTM0MTYwcQFhLgEAAAAAAAAAQ+yaRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ4ODMxMjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0ODgzMTIwcQFhLgEAAAAAAAAAQGdZRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxMzE1NjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTMxNTY4cQFhLgEAAAAAAAAAdf0ERw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNjY0MTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjY2NDE2cQFhLgEAAAAAAAAAdGGGRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMDc1NjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjA3NTY4cQFhLgEAAAAAAAAAajlkRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5ODk3NzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTg5Nzc2cQFhLgEAAAAAAAAAffn0Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNjEwNDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTYxMDQwcQFhLgEAAAAAAAAAx4z7Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMwOTk3MTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMDk5NzEycQFhLgEAAAAAAAAA4diaRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NDM2MDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTQzNjAwcQFhLgEAAAAAAAAAEHEMRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyNDcyNDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjQ3MjQ4cQFhLgEAAAAAAAAAr8EkRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxOTQ4OTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTk0ODk2cQFhLgEAAAAAAAAAw5unRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxODA4ODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTgwODgwcQFhLgEAAAAAAAAAEwPPRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDAzNzQyNTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwMzc0MjU2cQFhLgEAAAAAAAAAY9bhRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzMzM2NDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzMzNjQ4cQFhLgEAAAAAAAAA4IcdRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMzk2NDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTM5NjQ4cQFhLgEAAAAAAAAANb/WRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NzkwODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDc5MDg4cQFhLgEAAAAAAAAAPAOBRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNTIwMTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjUyMDE2cQFhLgEAAAAAAAAA0zVhRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0ODg1OTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDg4NTkycQFhLgEAAAAAAAAA+d2hRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzMTg2NzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzE4NjcycQFhLgEAAAAAAAAAoFG2Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNTM0NTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjUzNDU2cQFhLgEAAAAAAAAAA5XHRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMTE1MjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTExNTIwcQFhLgEAAAAAAAAAm2OtRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NDAzMzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTQwMzM2cQFhLgEAAAAAAAAAXtr+Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NjE0MjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDYxNDI0cQFhLgEAAAAAAAAAxDDgRQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0OTMxMDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDkzMTA0cQFhLgEAAAAAAAAAabigRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNzY1OTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTc2NTkycQFhLgEAAAAAAAAAuDPlRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxNTUwODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTU1MDg4cQFhLgEAAAAAAAAA+pfsRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMDUyODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTA1MjgwcQFhLgEAAAAAAAAAtDz+Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxNDgzODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTQ4Mzg0cQFhLgEAAAAAAAAAo0qFRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NDQ5NDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTQ0OTQ0cQFhLgEAAAAAAAAAU6ViRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxNjU4MDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTY1ODA4cQFhLgEAAAAAAAAAI0MORw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxODk0MjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTg5NDI0cQFhLgEAAAAAAAAAv5GKRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NTU3NjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDU1NzYwcQFhLgEAAAAAAAAAKfCmRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NzMwNDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDczMDQwcQFhLgEAAAAAAAAAn54aRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxNzU4ODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTc1ODg4cQFhLgEAAAAAAAAAhrrgRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA1MDAwMTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNTAwMDE2cQFhLgEAAAAAAAAAaZM9Rw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA1MDUwMDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNTA1MDA4cQFhLgEAAAAAAAAAWJK6Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxNjYxOTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTY2MTkycQFhLgEAAAAAAAAArFPMRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0Njg4MTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDY4ODE2cQFhLgEAAAAAAAAAofdBRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxMjUwNDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTI1MDQwcQFhLgEAAAAAAAAAZWP+Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMzY0NjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjM2NDY0cQFhLgEAAAAAAAAAx7cARw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxNjUyODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTY1MjgwcQFhLgEAAAAAAAAA0vx9Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxMzk1MzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTM5NTM2cQFhLgEAAAAAAAAA0v0ORw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMzODMxMTU1NTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMzgzMTE1NTUycQFhLgEAAAAAAAAA/6DARg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NTQ1NjY4NzM1NTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjU0NTY2ODczNTUycQFhLgEAAAAAAAAALR/yRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNjYxMjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjY2MTI4cQFhLgEAAAAAAAAAMgUvRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzNDkyMDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzQ5MjAwcQFhLgEAAAAAAAAAeDcIRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NTQ1NjY5NTA4MzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjU0NTY2OTUwODMycQFhLgEAAAAAAAAALVymRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NTQ1NjY5MzY2MjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjU0NTY2OTM2NjI0cQFhLgEAAAAAAAAA4xXZRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyNDk1NTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjQ5NTUycQFhLgEAAAAAAAAACP6aRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NjYzNTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTY2MzUycQFhLgEAAAAAAAAASqN/Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA1MDExNjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNTAxMTY4cQFhLgEAAAAAAAAASP+mRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MzY5NDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDM2OTQ0cQFhLgEAAAAAAAAA+LfdRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NTQ1NjY4ODU2NDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjU0NTY2ODg1NjQ4cQFhLgEAAAAAAAAAITkORw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NTQ1NjY5NDU1NTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjU0NTY2OTQ1NTUycQFhLgEAAAAAAAAAfgT1Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMTkxODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjE5MTg0cQFhLgEAAAAAAAAAt0G1Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNzQwMDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3Mjc0MDAwcQFhLgEAAAAAAAAADy/KRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxOTQxMjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTk0MTI4cQFhLgEAAAAAAAAAgKwDRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMDk3NzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjA5Nzc2cQFhLgEAAAAAAAAAnZsVRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxNjg4ODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTY4ODgwcQFhLgEAAAAAAAAADa1xRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0NjYwMzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDY2MDMycQFhLgEAAAAAAAAA24wMRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NTI4MTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTUyODE2cQFhLgEAAAAAAAAACGDiRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNTMwNzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjUzMDcycQFhLgEAAAAAAAAAYGYYRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5OTQzODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTk0Mzg0cQFhLgEAAAAAAAAAZZKeRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxODQyNDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTg0MjQwcQFhLgEAAAAAAAAA7t+cRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyNTU2OTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjU1Njk2cQFhLgEAAAAAAAAAnn/vRQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0OTkyNDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDk5MjQ4cQFhLgEAAAAAAAAAmW3vRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzMzE3MjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzMxNzI4cQFhLgEAAAAAAAAAIXrLRQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NjM0NzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTYzNDcycQFhLgEAAAAAAAAAJWr+Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzNDcwODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzQ3MDg4cQFhLgEAAAAAAAAAp0FzRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ4OTQ4MzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0ODk0ODMycQFhLgEAAAAAAAAAFqxcRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMzg2NzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjM4NjcycQFhLgEAAAAAAAAABHULRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzMDY4NjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzA2ODY0cQFhLgEAAAAAAAAA0BgaRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MTMwNDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDEzMDQwcQFhLgEAAAAAAAAA2I0fRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNzc4NDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3Mjc3ODQwcQFhLgEAAAAAAAAAE+j5Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MTEyMTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDExMjE2cQFhLgEAAAAAAAAA3HvdRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyNTU5ODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjU1OTg0cQFhLgEAAAAAAAAA+ilxRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ4OTkyNDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0ODk5MjQ4cQFhLgEAAAAAAAAASsK5Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzMjg1NjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzI4NTYwcQFhLgEAAAAAAAAATgfJRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDAzNzQwNjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwMzc0MDY0cQFhLgEAAAAAAAAAb0QqRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MTkxODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDE5MTg0cQFhLgEAAAAAAAAA1XwJRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5MzQyODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTM0Mjg4cQFhLgEAAAAAAAAA1/N1Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNTcyMDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjU3MjAwcQFhLgEAAAAAAAAAuaHCRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NTQ1NjY5NzAyMjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjU0NTY2OTcwMjI0cQFhLgEAAAAAAAAAM2kiRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMjE2ODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjIxNjgwcQFhLgEAAAAAAAAAEKN2Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NTQ1NjY4OTAwNjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjU0NTY2ODkwMDY0cQFhLgEAAAAAAAAAhVx9Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5OTM0MjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTkzNDI0cQFhLgEAAAAAAAAAgAABRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyNDQ4NDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjQ0ODQ4cQFhLgEAAAAAAAAA1iymRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyOTkwODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjk5MDg4cQFhLgEAAAAAAAAAkpmfRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxODA0OTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTgwNDk2cQFhLgEAAAAAAAAAk8LeRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ4OTA5OTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0ODkwOTkycQFhLgEAAAAAAAAARW+LRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxOTYwNDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTk2MDQ4cQFhLgEAAAAAAAAAPy6lRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzMTgwMDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzE4MDAwcQFhLgEAAAAAAAAAGzYURg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTQ5NDg1OTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU0OTQ4NTkycQFhLgEAAAAAAAAAUsPPRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NTQ1NjY5MzE3MjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjU0NTY2OTMxNzI4cQFhLgEAAAAAAAAA04x/Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxNzA4MDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTcwODAwcQFhLgEAAAAAAAAAlmcSRw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0MzE0NzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDMxNDcycQFhLgEAAAAAAAAA2/uERg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjMyMDA0OTAwMzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzMjAwNDkwMDMycQFhLgEAAAAAAAAAnDR2Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyNTI2ODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjUyNjg4cQFhLgEAAAAAAAAA/UA/Rg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyNDAxNDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjQwMTQ0cQFhLgEAAAAAAAAAn1/rRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyNjM2NjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjYzNjY0cQFhLgEAAAAAAAAAnbhJRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyNDA3MjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjQwNzIwcQFhLgEAAAAAAAAAYFjCRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzNDg2MjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzQ4NjI0cQFhLgEAAAAAAAAAFanJRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTUwMDI2NDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU1MDAyNjQwcQFhLgEAAAAAAAAA1FDdRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcyMDk2ODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MjA5NjgwcQFhLgEAAAAAAAAANXHZRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTcxNzc4MDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU3MTc3ODA4cQFhLgEAAAAAAAAAZkOuRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMzMjQyNDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMzI0MjQwcQFhLgEAAAAAAAAAoe2qRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTMyMzkzNzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTUzMjM5Mzc2cQFhLgEAAAAAAAAAXGOvRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADE2NjM1NTkxODg5NzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAxNjYzNTU5MTg4OTc2cQFhLgEAAAAAAAAAu0SPRg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "grad_norm_worker_T": [
+ 7092,
+ 17295,
+ 27360,
+ 37425,
+ 47446,
+ 57702,
+ 67766,
+ 77887,
+ 88117,
+ 98180,
+ 108201,
+ 118363,
+ 128491,
+ 138566,
+ 148760,
+ 158815,
+ 169025,
+ 179049,
+ 189282,
+ 199401,
+ 209460,
+ 219584,
+ 229726,
+ 239732,
+ 249735,
+ 259775,
+ 269808,
+ 279861,
+ 289940,
+ 299973,
+ 310048,
+ 320136,
+ 330150,
+ 340224,
+ 350309,
+ 360399,
+ 370496,
+ 380586,
+ 390819,
+ 400869,
+ 410935,
+ 420950,
+ 431080,
+ 441210,
+ 451272,
+ 461380,
+ 471442,
+ 481588,
+ 491642,
+ 501803,
+ 511970,
+ 522049,
+ 532228,
+ 542420,
+ 552444,
+ 562617,
+ 572728,
+ 583006,
+ 593033,
+ 603151,
+ 613362,
+ 623380,
+ 633578,
+ 643643,
+ 653716,
+ 663826,
+ 673980,
+ 684046,
+ 694187,
+ 704361,
+ 714366,
+ 724371,
+ 734482,
+ 744535,
+ 754765,
+ 764823,
+ 774937,
+ 785022,
+ 795248,
+ 805473,
+ 815528,
+ 825625,
+ 835868,
+ 845907,
+ 856040,
+ 866047,
+ 876264,
+ 886567,
+ 896651,
+ 906842,
+ 916946,
+ 927044,
+ 937159,
+ 947297,
+ 957483,
+ 967619,
+ 977685,
+ 987931,
+ 997992,
+ 1008057,
+ 1018161,
+ 1028173,
+ 1038181,
+ 1048423,
+ 1058516,
+ 1068664,
+ 1078745,
+ 1088915,
+ 1098982,
+ 1109051,
+ 1119248,
+ 1129476,
+ 1139634,
+ 1149642,
+ 1159787,
+ 1169921,
+ 1179950,
+ 1190126,
+ 1200324,
+ 1210423,
+ 1220527,
+ 1230722,
+ 1240952,
+ 1250992,
+ 1261097,
+ 1271343,
+ 1281413,
+ 1291499,
+ 1301710,
+ 1311847,
+ 1321892,
+ 1331977,
+ 1341990,
+ 1352171,
+ 1362304,
+ 1372388,
+ 1382502,
+ 1392710,
+ 1402796,
+ 1412887,
+ 1422977,
+ 1433185,
+ 1443192,
+ 1453280,
+ 1463367,
+ 1473426,
+ 1483538,
+ 1493651,
+ 1503789,
+ 1513807,
+ 1524054,
+ 1534270,
+ 1544475,
+ 1554621,
+ 1564794,
+ 1574902,
+ 1584946,
+ 1594969,
+ 1605030,
+ 1615240,
+ 1625376,
+ 1635501,
+ 1645593,
+ 1655647,
+ 1665666,
+ 1675690,
+ 1685894,
+ 1695985,
+ 1706141,
+ 1716287,
+ 1726355,
+ 1736463,
+ 1746631,
+ 1756661,
+ 1766740,
+ 1776977,
+ 1787103,
+ 1797200,
+ 1807240,
+ 1817361,
+ 1827400,
+ 1837414,
+ 1847427,
+ 1857487,
+ 1867647,
+ 1877732,
+ 1887967,
+ 1898042,
+ 1908247,
+ 1918306,
+ 1928494,
+ 1938721,
+ 1948923,
+ 1959067,
+ 1969319,
+ 1979533,
+ 1989846,
+ 1999888,
+ 2009996,
+ 2020061,
+ 2030264,
+ 2040327,
+ 2050460,
+ 2060522,
+ 2070580,
+ 2080743,
+ 2090936,
+ 2101068,
+ 2111206,
+ 2121377,
+ 2131404,
+ 2141639,
+ 2151758,
+ 2161825,
+ 2171926,
+ 2181983,
+ 2192015,
+ 2202021,
+ 2212255,
+ 2222394,
+ 2232596,
+ 2242789,
+ 2252936,
+ 2262978,
+ 2273109,
+ 2283149,
+ 2293150,
+ 2303299,
+ 2313529,
+ 2323715,
+ 2333847,
+ 2343908,
+ 2354027,
+ 2364236,
+ 2374408,
+ 2384421,
+ 2394449,
+ 2404570,
+ 2414720,
+ 2424739,
+ 2434910,
+ 2444954,
+ 2455136,
+ 2465287,
+ 2475416,
+ 2485480,
+ 2495611,
+ 2505778,
+ 2516009,
+ 2526028,
+ 2536301,
+ 2546337,
+ 2556388,
+ 2566565,
+ 2576644,
+ 2586843,
+ 2596917,
+ 2607004,
+ 2617084,
+ 2627196,
+ 2637376,
+ 2647508,
+ 2657647,
+ 2667719,
+ 2677916,
+ 2688062,
+ 2698165,
+ 2708266,
+ 2718409,
+ 2728434,
+ 2738546,
+ 2748676,
+ 2758916,
+ 2768940,
+ 2779034,
+ 2789172,
+ 2799238,
+ 2809325,
+ 2819376,
+ 2829617,
+ 2839810,
+ 2850035,
+ 2860186,
+ 2870357,
+ 2880499,
+ 2890628,
+ 2900645,
+ 2910940,
+ 2921130,
+ 2931376,
+ 2941396,
+ 2951566,
+ 2961627,
+ 2971677,
+ 2981706,
+ 2991857,
+ 3001949,
+ 3012036,
+ 3022219,
+ 3032228,
+ 3042327,
+ 3052539,
+ 3062795,
+ 3072890,
+ 3082958,
+ 3093114,
+ 3103171,
+ 3113209,
+ 3123383,
+ 3133426,
+ 3143440,
+ 3153512,
+ 3163568,
+ 3173608,
+ 3183697,
+ 3193908,
+ 3203975,
+ 3214077,
+ 3224153,
+ 3234356,
+ 3244419,
+ 3254536,
+ 3264737,
+ 3274833,
+ 3284908,
+ 3295136,
+ 3305228,
+ 3315439,
+ 3325546,
+ 3335646,
+ 3345788,
+ 3355992,
+ 3366175,
+ 3376417,
+ 3386451,
+ 3396577,
+ 3406704,
+ 3416900,
+ 3427128,
+ 3437217,
+ 3447329,
+ 3457428,
+ 3467437,
+ 3477621,
+ 3487827,
+ 3497962,
+ 3508158,
+ 3518325,
+ 3528428,
+ 3538602,
+ 3548805,
+ 3558923,
+ 3569065,
+ 3579228,
+ 3589313,
+ 3599339,
+ 3609365,
+ 3619431,
+ 3629497,
+ 3639722,
+ 3649807,
+ 3660057,
+ 3670255,
+ 3680439,
+ 3690694,
+ 3700742,
+ 3710792,
+ 3720932,
+ 3731140,
+ 3741222,
+ 3751315,
+ 3761564,
+ 3771760,
+ 3781886,
+ 3792129,
+ 3802264,
+ 3812528,
+ 3822619,
+ 3832758,
+ 3842873,
+ 3852997,
+ 3863097,
+ 3873229,
+ 3883380,
+ 3893402,
+ 3903403,
+ 3913426,
+ 3923558,
+ 3933711,
+ 3943715,
+ 3953867,
+ 3963931,
+ 3974121,
+ 3984302,
+ 3994504,
+ 4004625,
+ 4014639,
+ 4024664,
+ 4034759,
+ 4044842
+ ],
+ "hit_prob": [
+ 0.4584038257598877,
+ 0.4639436602592468,
+ 0.46915966272354126,
+ 0.5015532970428467,
+ 0.5135939121246338,
+ 0.5324704051017761,
+ 0.5445875525474548,
+ 0.5698356032371521,
+ 0.5940684676170349,
+ 0.5997020602226257,
+ 0.6300610899925232,
+ 0.6147475838661194,
+ 0.6306852102279663,
+ 0.6692981719970703,
+ 0.6699439883232117,
+ 0.664452850818634,
+ 0.660709023475647,
+ 0.670006513595581,
+ 0.6819190979003906,
+ 0.6690884232521057,
+ 0.6888436675071716,
+ 0.6607361435890198,
+ 0.6724419593811035,
+ 0.6732617020606995,
+ 0.6848787069320679,
+ 0.6841230392456055,
+ 0.6683732271194458,
+ 0.677583634853363,
+ 0.6931686401367188,
+ 0.6836021542549133,
+ 0.6785181760787964,
+ 0.7231125831604004,
+ 0.7054769396781921,
+ 0.7462831735610962,
+ 0.7338037490844727,
+ 0.7147411108016968,
+ 0.7415851950645447,
+ 0.7463585138320923,
+ 0.7495285272598267,
+ 0.7481348514556885,
+ 0.7733615040779114,
+ 0.7684600949287415,
+ 0.7984684109687805,
+ 0.7818464636802673,
+ 0.7655928134918213,
+ 0.7813166975975037,
+ 0.7793365716934204,
+ 0.79062420129776,
+ 0.8075731992721558,
+ 0.7831603288650513,
+ 0.8030018210411072,
+ 0.8021712899208069,
+ 0.8055548071861267,
+ 0.8013017177581787,
+ 0.7975777983665466,
+ 0.8114674687385559,
+ 0.8026254177093506,
+ 0.8179653286933899,
+ 0.8245471119880676,
+ 0.80816650390625,
+ 0.817903459072113,
+ 0.82330322265625,
+ 0.8266700506210327,
+ 0.8293140530586243,
+ 0.8221036195755005,
+ 0.8050059080123901,
+ 0.8353500962257385,
+ 0.8334366083145142,
+ 0.8417999148368835,
+ 0.8423640727996826,
+ 0.8303216695785522,
+ 0.8378893733024597,
+ 0.8506234288215637,
+ 0.844221830368042,
+ 0.8321515917778015,
+ 0.8450466990470886,
+ 0.8524292707443237,
+ 0.8579850196838379,
+ 0.8422502279281616,
+ 0.8524646759033203,
+ 0.8562067151069641,
+ 0.8596192002296448,
+ 0.8616262078285217,
+ 0.840459942817688,
+ 0.8552064895629883,
+ 0.8392099738121033,
+ 0.8592987060546875,
+ 0.8403043746948242,
+ 0.8464533686637878,
+ 0.8681471347808838,
+ 0.8672482371330261,
+ 0.8583268523216248,
+ 0.8489933013916016,
+ 0.85127854347229,
+ 0.8548262119293213,
+ 0.849933922290802,
+ 0.8559421896934509,
+ 0.8541879057884216,
+ 0.8625016808509827,
+ 0.8455280661582947,
+ 0.8557416200637817,
+ 0.8608731031417847,
+ 0.8707684278488159,
+ 0.8673667907714844,
+ 0.8687096834182739,
+ 0.8498526811599731,
+ 0.8617377281188965,
+ 0.8451045751571655,
+ 0.868503212928772,
+ 0.8520931601524353,
+ 0.8517776131629944,
+ 0.8610169887542725,
+ 0.858271598815918,
+ 0.8668175339698792,
+ 0.8706337809562683,
+ 0.8585788011550903,
+ 0.8548935651779175,
+ 0.8581443428993225,
+ 0.8604920506477356,
+ 0.8589937686920166,
+ 0.855873167514801,
+ 0.8684812188148499,
+ 0.8641429543495178,
+ 0.8731385469436646,
+ 0.8694756627082825,
+ 0.8807516694068909,
+ 0.8827956914901733,
+ 0.8725905418395996,
+ 0.8695301413536072,
+ 0.8698732852935791,
+ 0.8731276988983154,
+ 0.874636709690094,
+ 0.8774072527885437,
+ 0.8691304326057434,
+ 0.8688708543777466,
+ 0.8588985204696655,
+ 0.87636798620224,
+ 0.8821497559547424,
+ 0.8774193525314331,
+ 0.8695863485336304,
+ 0.8744996190071106,
+ 0.8787253499031067,
+ 0.8648188710212708,
+ 0.8710787892341614,
+ 0.8738561272621155,
+ 0.8728199601173401,
+ 0.8858339190483093,
+ 0.8835268616676331,
+ 0.874855101108551,
+ 0.8680466413497925,
+ 0.8758010268211365,
+ 0.865972101688385,
+ 0.8809434771537781,
+ 0.8717899918556213,
+ 0.889549732208252,
+ 0.8709385395050049,
+ 0.878284752368927,
+ 0.8847401142120361,
+ 0.8726630806922913,
+ 0.8820101022720337,
+ 0.8935883641242981,
+ 0.878822922706604,
+ 0.8758828639984131,
+ 0.8866919875144958,
+ 0.8848093152046204,
+ 0.8871808648109436,
+ 0.8752456903457642,
+ 0.8849489688873291,
+ 0.870940089225769,
+ 0.8809056878089905,
+ 0.877763569355011,
+ 0.8919132947921753,
+ 0.883283257484436,
+ 0.8732494115829468,
+ 0.8945837616920471,
+ 0.8835439682006836,
+ 0.8895148634910583,
+ 0.8839780688285828,
+ 0.8945630192756653,
+ 0.878862738609314,
+ 0.8771350979804993,
+ 0.8812299370765686,
+ 0.8931791186332703,
+ 0.894533097743988,
+ 0.8558513522148132,
+ 0.8803889155387878,
+ 0.857686460018158,
+ 0.8883776068687439,
+ 0.8780014514923096,
+ 0.8854272365570068,
+ 0.8706173300743103,
+ 0.8913155794143677,
+ 0.8753165602684021,
+ 0.892526388168335,
+ 0.8767524361610413,
+ 0.880550742149353,
+ 0.8766257762908936,
+ 0.8831791877746582,
+ 0.8937135338783264,
+ 0.8823024034500122,
+ 0.8951568007469177,
+ 0.8809869289398193,
+ 0.90183025598526,
+ 0.8836292028427124,
+ 0.8886584043502808,
+ 0.8885327577590942,
+ 0.8783662915229797,
+ 0.8800895810127258,
+ 0.880912721157074,
+ 0.8788893222808838,
+ 0.8726046085357666,
+ 0.8833683133125305,
+ 0.8792110085487366,
+ 0.8842160701751709,
+ 0.87309730052948,
+ 0.882496178150177,
+ 0.877069890499115,
+ 0.8889874815940857,
+ 0.886750340461731,
+ 0.8862553238868713,
+ 0.8806199431419373,
+ 0.8844752907752991,
+ 0.8622111678123474,
+ 0.8651368021965027,
+ 0.8664266467094421,
+ 0.8867178559303284,
+ 0.8758424520492554,
+ 0.8702552318572998,
+ 0.8798325061798096,
+ 0.8633228540420532,
+ 0.8791248798370361,
+ 0.8810040354728699,
+ 0.8848548531532288,
+ 0.8889737129211426,
+ 0.8876430988311768,
+ 0.8854694366455078,
+ 0.8859447836875916,
+ 0.8769290447235107,
+ 0.8828229308128357,
+ 0.8799382448196411,
+ 0.8801794648170471,
+ 0.883745014667511,
+ 0.8943211436271667,
+ 0.8858838677406311,
+ 0.8952729105949402,
+ 0.8961161375045776,
+ 0.8787282705307007,
+ 0.8919232487678528,
+ 0.8917691111564636,
+ 0.8961071968078613,
+ 0.8808997273445129,
+ 0.9035604596138,
+ 0.8966801762580872,
+ 0.8903999328613281,
+ 0.8866727948188782,
+ 0.883159339427948,
+ 0.8859160542488098,
+ 0.8893243074417114,
+ 0.8910515904426575,
+ 0.882603645324707,
+ 0.8902862668037415,
+ 0.8924959897994995,
+ 0.8914431929588318,
+ 0.8972822427749634,
+ 0.880599319934845,
+ 0.8861500024795532,
+ 0.8945665955543518,
+ 0.9003252983093262,
+ 0.8892542123794556,
+ 0.8742769360542297,
+ 0.8900261521339417,
+ 0.8918425440788269,
+ 0.8949355483055115,
+ 0.8832088708877563,
+ 0.8903234004974365,
+ 0.8939641714096069,
+ 0.8971104621887207,
+ 0.8885340690612793,
+ 0.8978492021560669,
+ 0.8892084360122681,
+ 0.8890619874000549,
+ 0.8872904181480408,
+ 0.8888042569160461,
+ 0.8919773697853088,
+ 0.8976510167121887,
+ 0.8975772261619568,
+ 0.9055136442184448,
+ 0.9014310240745544,
+ 0.893268883228302,
+ 0.8897476196289062,
+ 0.9026486873626709,
+ 0.8930268287658691,
+ 0.8954458832740784,
+ 0.8969051241874695,
+ 0.8999742865562439,
+ 0.8919302821159363,
+ 0.9006568193435669,
+ 0.8968884944915771,
+ 0.8929685354232788,
+ 0.8886905312538147,
+ 0.8927366137504578,
+ 0.8935167789459229,
+ 0.9010220170021057,
+ 0.8860769867897034,
+ 0.88214510679245,
+ 0.8885025978088379,
+ 0.8908466696739197,
+ 0.8985893726348877,
+ 0.8878878355026245,
+ 0.8975057005882263,
+ 0.8825921416282654,
+ 0.8877868056297302,
+ 0.8902691006660461,
+ 0.8916551470756531,
+ 0.8953701853752136,
+ 0.8924089670181274,
+ 0.8934911489486694,
+ 0.8870519399642944,
+ 0.888038158416748,
+ 0.8829606771469116,
+ 0.8751853108406067,
+ 0.875048041343689,
+ 0.8871800899505615,
+ 0.8894108533859253,
+ 0.8939335346221924,
+ 0.8941611051559448,
+ 0.8809176087379456,
+ 0.894284188747406,
+ 0.880020022392273,
+ 0.8969244360923767,
+ 0.8882916569709778,
+ 0.8908494710922241,
+ 0.8812298774719238,
+ 0.8896986246109009,
+ 0.8922185301780701,
+ 0.888234555721283,
+ 0.8871017694473267,
+ 0.8973533511161804,
+ 0.8697912693023682,
+ 0.89312744140625,
+ 0.8869518637657166,
+ 0.8954034447669983,
+ 0.895953893661499,
+ 0.8929954767227173,
+ 0.896898090839386,
+ 0.8896796107292175,
+ 0.8954615592956543,
+ 0.8946042060852051,
+ 0.8962644934654236,
+ 0.8925577402114868,
+ 0.8959282040596008,
+ 0.8894826769828796,
+ 0.8864633440971375,
+ 0.8986421227455139,
+ 0.8924940228462219,
+ 0.8874114155769348,
+ 0.8844438791275024,
+ 0.8942860960960388,
+ 0.8870683312416077,
+ 0.8995529413223267,
+ 0.8956544995307922,
+ 0.8979491591453552,
+ 0.8899986743927002,
+ 0.8889639973640442,
+ 0.9004926085472107,
+ 0.8972247838973999,
+ 0.8932197093963623,
+ 0.8912262320518494,
+ 0.9012744426727295,
+ 0.8978501558303833,
+ 0.9025203585624695,
+ 0.8957076072692871,
+ 0.9080156683921814,
+ 0.9056081175804138,
+ 0.8917101621627808,
+ 0.8969822525978088,
+ 0.9011585712432861,
+ 0.9028189182281494,
+ 0.8997918963432312,
+ 0.8885385394096375,
+ 0.8731441497802734,
+ 0.8884177207946777,
+ 0.8708909153938293,
+ 0.8785482048988342,
+ 0.894039511680603,
+ 0.8946731686592102,
+ 0.8987613916397095,
+ 0.9009314775466919,
+ 0.8970645070075989,
+ 0.8959252238273621,
+ 0.8929049372673035,
+ 0.8882966041564941,
+ 0.8989822268486023,
+ 0.8884996175765991,
+ 0.87778639793396,
+ 0.8869885802268982,
+ 0.890985906124115,
+ 0.8901368379592896,
+ 0.8904991149902344,
+ 0.8802978992462158
+ ],
+ "hit_prob_T": [
+ 7092,
+ 17295,
+ 27360,
+ 37425,
+ 47446,
+ 57702,
+ 67766,
+ 77887,
+ 88117,
+ 98180,
+ 108201,
+ 118363,
+ 128491,
+ 138566,
+ 148760,
+ 158815,
+ 169025,
+ 179049,
+ 189282,
+ 199401,
+ 209460,
+ 219584,
+ 229726,
+ 239732,
+ 249735,
+ 259775,
+ 269808,
+ 279861,
+ 289940,
+ 299973,
+ 310048,
+ 320136,
+ 330150,
+ 340224,
+ 350309,
+ 360399,
+ 370496,
+ 380586,
+ 390819,
+ 400869,
+ 410935,
+ 420950,
+ 431080,
+ 441210,
+ 451272,
+ 461380,
+ 471442,
+ 481588,
+ 491642,
+ 501803,
+ 511970,
+ 522049,
+ 532228,
+ 542420,
+ 552444,
+ 562617,
+ 572728,
+ 583006,
+ 593033,
+ 603151,
+ 613362,
+ 623380,
+ 633578,
+ 643643,
+ 653716,
+ 663826,
+ 673980,
+ 684046,
+ 694187,
+ 704361,
+ 714366,
+ 724371,
+ 734482,
+ 744535,
+ 754765,
+ 764823,
+ 774937,
+ 785022,
+ 795248,
+ 805473,
+ 815528,
+ 825625,
+ 835868,
+ 845907,
+ 856040,
+ 866047,
+ 876264,
+ 886567,
+ 896651,
+ 906842,
+ 916946,
+ 927044,
+ 937159,
+ 947297,
+ 957483,
+ 967619,
+ 977685,
+ 987931,
+ 997992,
+ 1008057,
+ 1018161,
+ 1028173,
+ 1038181,
+ 1048423,
+ 1058516,
+ 1068664,
+ 1078745,
+ 1088915,
+ 1098982,
+ 1109051,
+ 1119248,
+ 1129476,
+ 1139634,
+ 1149642,
+ 1159787,
+ 1169921,
+ 1179950,
+ 1190126,
+ 1200324,
+ 1210423,
+ 1220527,
+ 1230722,
+ 1240952,
+ 1250992,
+ 1261097,
+ 1271343,
+ 1281413,
+ 1291499,
+ 1301710,
+ 1311847,
+ 1321892,
+ 1331977,
+ 1341990,
+ 1352171,
+ 1362304,
+ 1372388,
+ 1382502,
+ 1392710,
+ 1402796,
+ 1412887,
+ 1422977,
+ 1433185,
+ 1443192,
+ 1453280,
+ 1463367,
+ 1473426,
+ 1483538,
+ 1493651,
+ 1503789,
+ 1513807,
+ 1524054,
+ 1534270,
+ 1544475,
+ 1554621,
+ 1564794,
+ 1574902,
+ 1584946,
+ 1594969,
+ 1605030,
+ 1615240,
+ 1625376,
+ 1635501,
+ 1645593,
+ 1655647,
+ 1665666,
+ 1675690,
+ 1685894,
+ 1695985,
+ 1706141,
+ 1716287,
+ 1726355,
+ 1736463,
+ 1746631,
+ 1756661,
+ 1766740,
+ 1776977,
+ 1787103,
+ 1797200,
+ 1807240,
+ 1817361,
+ 1827400,
+ 1837414,
+ 1847427,
+ 1857487,
+ 1867647,
+ 1877732,
+ 1887967,
+ 1898042,
+ 1908247,
+ 1918306,
+ 1928494,
+ 1938721,
+ 1948923,
+ 1959067,
+ 1969319,
+ 1979533,
+ 1989846,
+ 1999888,
+ 2009996,
+ 2020061,
+ 2030264,
+ 2040327,
+ 2050460,
+ 2060522,
+ 2070580,
+ 2080743,
+ 2090936,
+ 2101068,
+ 2111206,
+ 2121377,
+ 2131404,
+ 2141639,
+ 2151758,
+ 2161825,
+ 2171926,
+ 2181983,
+ 2192015,
+ 2202021,
+ 2212255,
+ 2222394,
+ 2232596,
+ 2242789,
+ 2252936,
+ 2262978,
+ 2273109,
+ 2283149,
+ 2293150,
+ 2303299,
+ 2313529,
+ 2323715,
+ 2333847,
+ 2343908,
+ 2354027,
+ 2364236,
+ 2374408,
+ 2384421,
+ 2394449,
+ 2404570,
+ 2414720,
+ 2424739,
+ 2434910,
+ 2444954,
+ 2455136,
+ 2465287,
+ 2475416,
+ 2485480,
+ 2495611,
+ 2505778,
+ 2516009,
+ 2526028,
+ 2536301,
+ 2546337,
+ 2556388,
+ 2566565,
+ 2576644,
+ 2586843,
+ 2596917,
+ 2607004,
+ 2617084,
+ 2627196,
+ 2637376,
+ 2647508,
+ 2657647,
+ 2667719,
+ 2677916,
+ 2688062,
+ 2698165,
+ 2708266,
+ 2718409,
+ 2728434,
+ 2738546,
+ 2748676,
+ 2758916,
+ 2768940,
+ 2779034,
+ 2789172,
+ 2799238,
+ 2809325,
+ 2819376,
+ 2829617,
+ 2839810,
+ 2850035,
+ 2860186,
+ 2870357,
+ 2880499,
+ 2890628,
+ 2900645,
+ 2910940,
+ 2921130,
+ 2931376,
+ 2941396,
+ 2951566,
+ 2961627,
+ 2971677,
+ 2981706,
+ 2991857,
+ 3001949,
+ 3012036,
+ 3022219,
+ 3032228,
+ 3042327,
+ 3052539,
+ 3062795,
+ 3072890,
+ 3082958,
+ 3093114,
+ 3103171,
+ 3113209,
+ 3123383,
+ 3133426,
+ 3143440,
+ 3153512,
+ 3163568,
+ 3173608,
+ 3183697,
+ 3193908,
+ 3203975,
+ 3214077,
+ 3224153,
+ 3234356,
+ 3244419,
+ 3254536,
+ 3264737,
+ 3274833,
+ 3284908,
+ 3295136,
+ 3305228,
+ 3315439,
+ 3325546,
+ 3335646,
+ 3345788,
+ 3355992,
+ 3366175,
+ 3376417,
+ 3386451,
+ 3396577,
+ 3406704,
+ 3416900,
+ 3427128,
+ 3437217,
+ 3447329,
+ 3457428,
+ 3467437,
+ 3477621,
+ 3487827,
+ 3497962,
+ 3508158,
+ 3518325,
+ 3528428,
+ 3538602,
+ 3548805,
+ 3558923,
+ 3569065,
+ 3579228,
+ 3589313,
+ 3599339,
+ 3609365,
+ 3619431,
+ 3629497,
+ 3639722,
+ 3649807,
+ 3660057,
+ 3670255,
+ 3680439,
+ 3690694,
+ 3700742,
+ 3710792,
+ 3720932,
+ 3731140,
+ 3741222,
+ 3751315,
+ 3761564,
+ 3771760,
+ 3781886,
+ 3792129,
+ 3802264,
+ 3812528,
+ 3822619,
+ 3832758,
+ 3842873,
+ 3852997,
+ 3863097,
+ 3873229,
+ 3883380,
+ 3893402,
+ 3903403,
+ 3913426,
+ 3923558,
+ 3933711,
+ 3943715,
+ 3953867,
+ 3963931,
+ 3974121,
+ 3984302,
+ 3994504,
+ 4004625,
+ 4014639,
+ 4024664,
+ 4034759,
+ 4044842
+ ],
+ "manager_advantage": [
+ -0.02663896232843399,
+ -0.2572029232978821,
+ -0.3160575330257416,
+ -0.3589768409729004,
+ -0.3925330340862274,
+ -0.4276769757270813,
+ -0.46622902154922485,
+ -0.49740758538246155,
+ -0.5263801217079163,
+ -0.5695216655731201,
+ -0.5865886211395264,
+ -0.6254826188087463,
+ -0.667134165763855,
+ -0.680033802986145,
+ -0.7083505988121033,
+ -0.7382078170776367,
+ -0.7771002650260925,
+ -0.819459855556488,
+ -0.81380695104599,
+ -0.8563333749771118,
+ -0.8826944231987,
+ -0.9237335920333862,
+ -0.9398747086524963,
+ -0.9796950817108154,
+ -1.0072346925735474,
+ -1.0562115907669067,
+ -1.079664945602417,
+ -1.1037606000900269,
+ -1.1299134492874146,
+ -1.151520013809204,
+ -1.196596384048462,
+ -1.217590570449829,
+ -1.2608706951141357,
+ -1.2657084465026855,
+ -1.2967222929000854,
+ -1.348082184791565,
+ -1.3615950345993042,
+ -1.4022541046142578,
+ -1.4215037822723389,
+ -1.46620512008667,
+ -1.4906938076019287,
+ -1.5159780979156494,
+ -1.5491546392440796,
+ -1.5833616256713867,
+ -1.6115306615829468,
+ -1.6487278938293457,
+ -1.6862542629241943,
+ -1.6965078115463257,
+ -1.7403104305267334,
+ -1.7802734375,
+ -1.8065814971923828,
+ -1.8401408195495605,
+ -1.861684799194336,
+ -1.9116312265396118,
+ -1.9490106105804443,
+ -1.9901500940322876,
+ -2.0210635662078857,
+ -2.0568301677703857,
+ -2.1130738258361816,
+ -2.1293232440948486,
+ -2.1992931365966797,
+ -2.2236013412475586,
+ -2.2283265590667725,
+ -2.2365212440490723,
+ -2.251364231109619,
+ -2.303544282913208,
+ -2.328650951385498,
+ -2.338625192642212,
+ -2.3672690391540527,
+ -2.407958507537842,
+ -2.465277671813965,
+ -2.4935595989227295,
+ -2.5210540294647217,
+ -2.5611369609832764,
+ -2.582350492477417,
+ -2.5896568298339844,
+ -2.616164445877075,
+ -2.694967746734619,
+ -2.7611336708068848,
+ -2.7586424350738525,
+ -2.739248037338257,
+ -2.874786376953125,
+ -2.837709903717041,
+ -2.839301347732544,
+ -2.9107344150543213,
+ -2.8783843517303467,
+ -2.9587619304656982,
+ -2.9630491733551025,
+ -2.9994966983795166,
+ -3.0323166847229004,
+ -3.1081254482269287,
+ -3.0649845600128174,
+ -3.1526780128479004,
+ -3.0656087398529053,
+ -3.1003639698028564,
+ -3.167792558670044,
+ -3.194199800491333,
+ -3.170417070388794,
+ -3.2182180881500244,
+ -3.286595582962036,
+ -3.253046989440918,
+ -3.283027172088623,
+ -3.3277976512908936,
+ -3.2859718799591064,
+ -3.3099896907806396,
+ -3.3648436069488525,
+ -3.453742742538452,
+ -3.3442952632904053,
+ -3.440768003463745,
+ -3.4055874347686768,
+ -3.456258773803711,
+ -3.488309860229492,
+ -3.488774061203003,
+ -3.454360008239746,
+ -3.5034401416778564,
+ -3.574692964553833,
+ -3.5319430828094482,
+ -3.6198387145996094,
+ -3.6077709197998047,
+ -3.635171890258789,
+ -3.6078150272369385,
+ -3.6461851596832275,
+ -3.666093587875366,
+ -3.6448373794555664,
+ -3.6459691524505615,
+ -3.6877198219299316,
+ -3.736276626586914,
+ -3.7382326126098633,
+ -3.7354815006256104,
+ -3.7762250900268555,
+ -3.7414212226867676,
+ -3.7032105922698975,
+ -3.725712776184082,
+ -3.8365254402160645,
+ -3.7751739025115967,
+ -3.7946979999542236,
+ -3.787660837173462,
+ -3.822112798690796,
+ -3.8109042644500732,
+ -3.8331146240234375,
+ -3.8521437644958496,
+ -3.822251796722412,
+ -3.8772804737091064,
+ -3.870500326156616,
+ -3.8677215576171875,
+ -3.989267349243164,
+ -3.9670557975769043,
+ -4.018924713134766,
+ -4.045201778411865,
+ -3.9821934700012207,
+ -4.109235763549805,
+ -4.110668182373047,
+ -4.1789374351501465,
+ -4.1662492752075195,
+ -4.199103832244873,
+ -4.244976997375488,
+ -4.250110626220703,
+ -4.264821529388428,
+ -4.351170539855957,
+ -4.364428997039795,
+ -4.461562633514404,
+ -4.41312837600708,
+ -4.433082580566406,
+ -4.456157684326172,
+ -4.504942893981934,
+ -4.61372184753418,
+ -4.57578706741333,
+ -4.57204532623291,
+ -4.61514139175415,
+ -4.657320499420166,
+ -4.6603522300720215,
+ -4.74601411819458,
+ -4.713131904602051,
+ -4.699613571166992,
+ -4.730753421783447,
+ -4.7651848793029785,
+ -4.773901462554932,
+ -4.874427318572998,
+ -4.796988487243652,
+ -4.873668670654297,
+ -4.859199523925781,
+ -4.957581043243408,
+ -4.977205276489258,
+ -4.91019344329834,
+ -4.925057411193848,
+ -4.928022861480713,
+ -4.958485126495361,
+ -4.979419231414795,
+ -5.0652546882629395,
+ -5.028635025024414,
+ -5.036294937133789,
+ -5.067955493927002,
+ -5.132736682891846,
+ -5.126091480255127,
+ -5.150761604309082,
+ -5.1535234451293945,
+ -5.172701835632324,
+ -5.165377616882324,
+ -5.210761070251465,
+ -5.198371410369873,
+ -5.304426193237305,
+ -5.301833152770996,
+ -5.292501926422119,
+ -5.284456253051758,
+ -5.375486373901367,
+ -5.417087078094482,
+ -5.364228248596191,
+ -5.436864852905273,
+ -5.447391033172607,
+ -5.3874592781066895,
+ -5.411096096038818,
+ -5.457617282867432,
+ -5.498485565185547,
+ -5.45281982421875,
+ -5.4777116775512695,
+ -5.506178379058838,
+ -5.506708145141602,
+ -5.554549217224121,
+ -5.578658103942871,
+ -5.584029197692871,
+ -5.584778308868408,
+ -5.602518558502197,
+ -5.644182205200195,
+ -5.703494071960449,
+ -5.694747447967529,
+ -5.7513909339904785,
+ -5.717569828033447,
+ -5.711701393127441,
+ -5.743716716766357,
+ -5.773974418640137,
+ -5.776387691497803,
+ -5.8368611335754395,
+ -5.835846900939941,
+ -5.848568439483643,
+ -5.875497341156006,
+ -5.8594865798950195,
+ -5.886791229248047,
+ -5.9018635749816895,
+ -5.976096153259277,
+ -5.957978248596191,
+ -5.976349353790283,
+ -5.972891807556152,
+ -6.052141189575195,
+ -5.963481426239014,
+ -6.047531604766846,
+ -6.032852649688721,
+ -6.100057601928711,
+ -6.070554256439209,
+ -6.158865451812744,
+ -6.146196365356445,
+ -6.198877334594727,
+ -6.145649433135986,
+ -6.175655364990234,
+ -6.198782444000244,
+ -6.1713480949401855,
+ -6.235501289367676,
+ -6.269243240356445,
+ -6.260392189025879,
+ -6.308013439178467,
+ -6.257373332977295,
+ -6.254857540130615,
+ -6.320016860961914,
+ -6.32995080947876,
+ -6.34188175201416,
+ -6.317267417907715,
+ -6.368008136749268,
+ -6.411313056945801,
+ -6.443546772003174,
+ -6.4348273277282715,
+ -6.396727561950684,
+ -6.453780651092529,
+ -6.435396194458008,
+ -6.516517162322998,
+ -6.536193370819092,
+ -6.506653785705566,
+ -6.516372203826904,
+ -6.5315961837768555,
+ -6.566682815551758,
+ -6.599145889282227,
+ -6.591296672821045,
+ -6.601822853088379,
+ -6.667782783508301,
+ -6.6276984214782715,
+ -6.730141639709473,
+ -6.698603630065918,
+ -6.720579624176025,
+ -6.713376045227051,
+ -6.744507312774658,
+ -6.757623672485352,
+ -6.719842910766602,
+ -6.769012451171875,
+ -6.731733798980713,
+ -6.729036808013916,
+ -6.718231678009033,
+ -6.789065361022949,
+ -6.747708797454834,
+ -6.771276950836182,
+ -6.869157791137695,
+ -6.835748672485352,
+ -6.784924030303955,
+ -6.820185661315918,
+ -6.872776985168457,
+ -6.827708721160889,
+ -6.804811954498291,
+ -6.832693099975586,
+ -6.895574569702148,
+ -6.8932647705078125,
+ -6.867786407470703,
+ -6.9491448402404785,
+ -6.907809257507324,
+ -6.893585681915283,
+ -6.879058361053467,
+ -6.861417293548584,
+ -6.913512706756592,
+ -6.925076007843018,
+ -6.909507751464844,
+ -6.922842502593994,
+ -6.953697681427002,
+ -6.910955905914307,
+ -6.972376346588135,
+ -6.978240966796875,
+ -7.032483100891113,
+ -7.03015661239624,
+ -7.028024673461914,
+ -6.974826812744141,
+ -6.997699737548828,
+ -7.00825309753418,
+ -7.037531852722168,
+ -7.051389694213867,
+ -7.108765602111816,
+ -7.087390899658203,
+ -7.063278675079346,
+ -7.11239767074585,
+ -7.1582112312316895,
+ -7.089607238769531,
+ -7.110701560974121,
+ -7.183657169342041,
+ -7.229252338409424,
+ -7.172665596008301,
+ -7.21010160446167,
+ -7.263236045837402,
+ -7.2993059158325195,
+ -7.2969770431518555,
+ -7.256763935089111,
+ -7.277787685394287,
+ -7.3037238121032715,
+ -7.294057369232178,
+ -7.291290283203125,
+ -7.382502555847168,
+ -7.334388256072998,
+ -7.329132556915283,
+ -7.323853492736816,
+ -7.358192443847656,
+ -7.400827407836914,
+ -7.371307373046875,
+ -7.396085262298584,
+ -7.457416534423828,
+ -7.400442600250244,
+ -7.4437761306762695,
+ -7.541692733764648,
+ -7.468096733093262,
+ -7.485698223114014,
+ -7.502988338470459,
+ -7.509515285491943,
+ -7.547822952270508,
+ -7.62152099609375,
+ -7.587212085723877,
+ -7.58518648147583,
+ -7.5835089683532715,
+ -7.63592529296875,
+ -7.610189914703369,
+ -7.630623817443848,
+ -7.634887218475342,
+ -7.680578231811523,
+ -7.670523643493652,
+ -7.645967960357666,
+ -7.716920375823975,
+ -7.668059825897217,
+ -7.6560869216918945,
+ -7.6885457038879395,
+ -7.80944299697876,
+ -7.785663604736328,
+ -7.757388591766357,
+ -7.767238140106201,
+ -7.768413066864014,
+ -7.727843284606934,
+ -7.665641784667969,
+ -7.749630928039551,
+ -7.709927558898926,
+ -7.78874397277832,
+ -7.756791591644287,
+ -7.760394096374512,
+ -7.7735490798950195,
+ -7.769827365875244,
+ -7.813776969909668,
+ -7.825157165527344,
+ -7.858746528625488,
+ -7.797523498535156,
+ -7.827890872955322,
+ -7.733953952789307
+ ],
+ "manager_advantage_T": [
+ 7092,
+ 17295,
+ 27360,
+ 37425,
+ 47446,
+ 57702,
+ 67766,
+ 77887,
+ 88117,
+ 98180,
+ 108201,
+ 118363,
+ 128491,
+ 138566,
+ 148760,
+ 158815,
+ 169025,
+ 179049,
+ 189282,
+ 199401,
+ 209460,
+ 219584,
+ 229726,
+ 239732,
+ 249735,
+ 259775,
+ 269808,
+ 279861,
+ 289940,
+ 299973,
+ 310048,
+ 320136,
+ 330150,
+ 340224,
+ 350309,
+ 360399,
+ 370496,
+ 380586,
+ 390819,
+ 400869,
+ 410935,
+ 420950,
+ 431080,
+ 441210,
+ 451272,
+ 461380,
+ 471442,
+ 481588,
+ 491642,
+ 501803,
+ 511970,
+ 522049,
+ 532228,
+ 542420,
+ 552444,
+ 562617,
+ 572728,
+ 583006,
+ 593033,
+ 603151,
+ 613362,
+ 623380,
+ 633578,
+ 643643,
+ 653716,
+ 663826,
+ 673980,
+ 684046,
+ 694187,
+ 704361,
+ 714366,
+ 724371,
+ 734482,
+ 744535,
+ 754765,
+ 764823,
+ 774937,
+ 785022,
+ 795248,
+ 805473,
+ 815528,
+ 825625,
+ 835868,
+ 845907,
+ 856040,
+ 866047,
+ 876264,
+ 886567,
+ 896651,
+ 906842,
+ 916946,
+ 927044,
+ 937159,
+ 947297,
+ 957483,
+ 967619,
+ 977685,
+ 987931,
+ 997992,
+ 1008057,
+ 1018161,
+ 1028173,
+ 1038181,
+ 1048423,
+ 1058516,
+ 1068664,
+ 1078745,
+ 1088915,
+ 1098982,
+ 1109051,
+ 1119248,
+ 1129476,
+ 1139634,
+ 1149642,
+ 1159787,
+ 1169921,
+ 1179950,
+ 1190126,
+ 1200324,
+ 1210423,
+ 1220527,
+ 1230722,
+ 1240952,
+ 1250992,
+ 1261097,
+ 1271343,
+ 1281413,
+ 1291499,
+ 1301710,
+ 1311847,
+ 1321892,
+ 1331977,
+ 1341990,
+ 1352171,
+ 1362304,
+ 1372388,
+ 1382502,
+ 1392710,
+ 1402796,
+ 1412887,
+ 1422977,
+ 1433185,
+ 1443192,
+ 1453280,
+ 1463367,
+ 1473426,
+ 1483538,
+ 1493651,
+ 1503789,
+ 1513807,
+ 1524054,
+ 1534270,
+ 1544475,
+ 1554621,
+ 1564794,
+ 1574902,
+ 1584946,
+ 1594969,
+ 1605030,
+ 1615240,
+ 1625376,
+ 1635501,
+ 1645593,
+ 1655647,
+ 1665666,
+ 1675690,
+ 1685894,
+ 1695985,
+ 1706141,
+ 1716287,
+ 1726355,
+ 1736463,
+ 1746631,
+ 1756661,
+ 1766740,
+ 1776977,
+ 1787103,
+ 1797200,
+ 1807240,
+ 1817361,
+ 1827400,
+ 1837414,
+ 1847427,
+ 1857487,
+ 1867647,
+ 1877732,
+ 1887967,
+ 1898042,
+ 1908247,
+ 1918306,
+ 1928494,
+ 1938721,
+ 1948923,
+ 1959067,
+ 1969319,
+ 1979533,
+ 1989846,
+ 1999888,
+ 2009996,
+ 2020061,
+ 2030264,
+ 2040327,
+ 2050460,
+ 2060522,
+ 2070580,
+ 2080743,
+ 2090936,
+ 2101068,
+ 2111206,
+ 2121377,
+ 2131404,
+ 2141639,
+ 2151758,
+ 2161825,
+ 2171926,
+ 2181983,
+ 2192015,
+ 2202021,
+ 2212255,
+ 2222394,
+ 2232596,
+ 2242789,
+ 2252936,
+ 2262978,
+ 2273109,
+ 2283149,
+ 2293150,
+ 2303299,
+ 2313529,
+ 2323715,
+ 2333847,
+ 2343908,
+ 2354027,
+ 2364236,
+ 2374408,
+ 2384421,
+ 2394449,
+ 2404570,
+ 2414720,
+ 2424739,
+ 2434910,
+ 2444954,
+ 2455136,
+ 2465287,
+ 2475416,
+ 2485480,
+ 2495611,
+ 2505778,
+ 2516009,
+ 2526028,
+ 2536301,
+ 2546337,
+ 2556388,
+ 2566565,
+ 2576644,
+ 2586843,
+ 2596917,
+ 2607004,
+ 2617084,
+ 2627196,
+ 2637376,
+ 2647508,
+ 2657647,
+ 2667719,
+ 2677916,
+ 2688062,
+ 2698165,
+ 2708266,
+ 2718409,
+ 2728434,
+ 2738546,
+ 2748676,
+ 2758916,
+ 2768940,
+ 2779034,
+ 2789172,
+ 2799238,
+ 2809325,
+ 2819376,
+ 2829617,
+ 2839810,
+ 2850035,
+ 2860186,
+ 2870357,
+ 2880499,
+ 2890628,
+ 2900645,
+ 2910940,
+ 2921130,
+ 2931376,
+ 2941396,
+ 2951566,
+ 2961627,
+ 2971677,
+ 2981706,
+ 2991857,
+ 3001949,
+ 3012036,
+ 3022219,
+ 3032228,
+ 3042327,
+ 3052539,
+ 3062795,
+ 3072890,
+ 3082958,
+ 3093114,
+ 3103171,
+ 3113209,
+ 3123383,
+ 3133426,
+ 3143440,
+ 3153512,
+ 3163568,
+ 3173608,
+ 3183697,
+ 3193908,
+ 3203975,
+ 3214077,
+ 3224153,
+ 3234356,
+ 3244419,
+ 3254536,
+ 3264737,
+ 3274833,
+ 3284908,
+ 3295136,
+ 3305228,
+ 3315439,
+ 3325546,
+ 3335646,
+ 3345788,
+ 3355992,
+ 3366175,
+ 3376417,
+ 3386451,
+ 3396577,
+ 3406704,
+ 3416900,
+ 3427128,
+ 3437217,
+ 3447329,
+ 3457428,
+ 3467437,
+ 3477621,
+ 3487827,
+ 3497962,
+ 3508158,
+ 3518325,
+ 3528428,
+ 3538602,
+ 3548805,
+ 3558923,
+ 3569065,
+ 3579228,
+ 3589313,
+ 3599339,
+ 3609365,
+ 3619431,
+ 3629497,
+ 3639722,
+ 3649807,
+ 3660057,
+ 3670255,
+ 3680439,
+ 3690694,
+ 3700742,
+ 3710792,
+ 3720932,
+ 3731140,
+ 3741222,
+ 3751315,
+ 3761564,
+ 3771760,
+ 3781886,
+ 3792129,
+ 3802264,
+ 3812528,
+ 3822619,
+ 3832758,
+ 3842873,
+ 3852997,
+ 3863097,
+ 3873229,
+ 3883380,
+ 3893402,
+ 3903403,
+ 3913426,
+ 3923558,
+ 3933711,
+ 3943715,
+ 3953867,
+ 3963931,
+ 3974121,
+ 3984302,
+ 3994504,
+ 4004625,
+ 4014639,
+ 4024664,
+ 4034759,
+ 4044842
+ ],
+ "manager_cos_sim": [
+ 0.0013999426737427711,
+ -0.06089382618665695,
+ -0.07936447113752365,
+ -0.08471515029668808,
+ -0.07500360906124115,
+ -0.097037672996521,
+ -0.07922667264938354,
+ -0.08094844967126846,
+ -0.08515249937772751,
+ -0.09300190955400467,
+ -0.08136782795190811,
+ -0.08924935758113861,
+ -0.09708844870328903,
+ -0.08538547158241272,
+ -0.09384705126285553,
+ -0.07791787385940552,
+ -0.09998811781406403,
+ -0.11235105246305466,
+ -0.07748251408338547,
+ -0.08281132578849792,
+ -0.0730704739689827,
+ -0.10586615651845932,
+ -0.08346586674451828,
+ -0.08539330959320068,
+ -0.09806639701128006,
+ -0.10889947414398193,
+ -0.10338594019412994,
+ -0.10574039071798325,
+ -0.0914807915687561,
+ -0.08639836311340332,
+ -0.10557430982589722,
+ -0.09885096549987793,
+ -0.11643815785646439,
+ -0.09179851412773132,
+ -0.07964010536670685,
+ -0.12559638917446136,
+ -0.08502417057752609,
+ -0.11668612062931061,
+ -0.09153150022029877,
+ -0.11762498319149017,
+ -0.08879366517066956,
+ -0.0923827663064003,
+ -0.10095604509115219,
+ -0.10738688707351685,
+ -0.11440683901309967,
+ -0.1069902777671814,
+ -0.11533714830875397,
+ -0.09450805932283401,
+ -0.1026809811592102,
+ -0.1252119392156601,
+ -0.11810905486345291,
+ -0.14928990602493286,
+ -0.1203932911157608,
+ -0.12141886353492737,
+ -0.09295306354761124,
+ -0.10260079801082611,
+ -0.11972078680992126,
+ -0.12405046075582504,
+ -0.11963300406932831,
+ -0.12230851501226425,
+ -0.09744994342327118,
+ -0.10232318192720413,
+ -0.11447165161371231,
+ -0.12998130917549133,
+ -0.14767882227897644,
+ -0.12152700126171112,
+ -0.11474104225635529,
+ -0.12440599501132965,
+ -0.0985938310623169,
+ -0.12628935277462006,
+ -0.11556994915008545,
+ -0.09781160950660706,
+ -0.1225469782948494,
+ -0.1254964917898178,
+ -0.12690262496471405,
+ -0.12625300884246826,
+ -0.12845006585121155,
+ -0.12051187455654144,
+ -0.1258319765329361,
+ -0.12794937193393707,
+ -0.09988006204366684,
+ -0.11768180131912231,
+ -0.1321483999490738,
+ -0.11946620792150497,
+ -0.13976210355758667,
+ -0.1332569420337677,
+ -0.12243624776601791,
+ -0.12676727771759033,
+ -0.13499093055725098,
+ -0.15286603569984436,
+ -0.1400037705898285,
+ -0.1247718334197998,
+ -0.13486291468143463,
+ -0.10036264359951019,
+ -0.13279660046100616,
+ -0.11464951932430267,
+ -0.1427542120218277,
+ -0.1386754959821701,
+ -0.12685386836528778,
+ -0.1369033008813858,
+ -0.13929614424705505,
+ -0.1510002613067627,
+ -0.15304064750671387,
+ -0.1402267962694168,
+ -0.13472311198711395,
+ -0.11181573569774628,
+ -0.12507176399230957,
+ -0.13696914911270142,
+ -0.17100313305854797,
+ -0.1536780595779419,
+ -0.117757149040699,
+ -0.14761754870414734,
+ -0.14473828673362732,
+ -0.12575697898864746,
+ -0.14652074873447418,
+ -0.12968701124191284,
+ -0.12723132967948914,
+ -0.1574835479259491,
+ -0.14264965057373047,
+ -0.15251126885414124,
+ -0.14999344944953918,
+ -0.15779165923595428,
+ -0.13600097596645355,
+ -0.13922901451587677,
+ -0.13581521809101105,
+ -0.17948302626609802,
+ -0.16057142615318298,
+ -0.17534580826759338,
+ -0.16071081161499023,
+ -0.1720217764377594,
+ -0.12904788553714752,
+ -0.13148453831672668,
+ -0.14580994844436646,
+ -0.19100476801395416,
+ -0.13595667481422424,
+ -0.13742506504058838,
+ -0.15645639598369598,
+ -0.17715494334697723,
+ -0.13915181159973145,
+ -0.17168793082237244,
+ -0.13113513588905334,
+ -0.1360006034374237,
+ -0.15848778188228607,
+ -0.16631624102592468,
+ -0.12957465648651123,
+ -0.1792609840631485,
+ -0.12090274691581726,
+ -0.1933591514825821,
+ -0.13891054689884186,
+ -0.1403803825378418,
+ -0.16964268684387207,
+ -0.1462811827659607,
+ -0.17066963016986847,
+ -0.16421066224575043,
+ -0.1521696150302887,
+ -0.15149450302124023,
+ -0.13390116393566132,
+ -0.13456261157989502,
+ -0.1802234947681427,
+ -0.1228814572095871,
+ -0.17287974059581757,
+ -0.14038428664207458,
+ -0.14694014191627502,
+ -0.13941794633865356,
+ -0.1489149034023285,
+ -0.1673930436372757,
+ -0.154544398188591,
+ -0.1709645837545395,
+ -0.1611533761024475,
+ -0.16177265346050262,
+ -0.145545095205307,
+ -0.1798071414232254,
+ -0.14881128072738647,
+ -0.15588092803955078,
+ -0.17142553627490997,
+ -0.11023210734128952,
+ -0.15719738602638245,
+ -0.16465899348258972,
+ -0.12069100141525269,
+ -0.1650637537240982,
+ -0.1265423744916916,
+ -0.16129519045352936,
+ -0.16759181022644043,
+ -0.11219095438718796,
+ -0.14840127527713776,
+ -0.168654203414917,
+ -0.13084298372268677,
+ -0.17838288843631744,
+ -0.14980866014957428,
+ -0.14405299723148346,
+ -0.1605890691280365,
+ -0.15121762454509735,
+ -0.171775221824646,
+ -0.16217102110385895,
+ -0.16920512914657593,
+ -0.17635942995548248,
+ -0.1546727865934372,
+ -0.13463862240314484,
+ -0.12956906855106354,
+ -0.16166239976882935,
+ -0.16359466314315796,
+ -0.15399517118930817,
+ -0.17189906537532806,
+ -0.16124728322029114,
+ -0.18721666932106018,
+ -0.1728546917438507,
+ -0.1416850984096527,
+ -0.15470638871192932,
+ -0.14106491208076477,
+ -0.14693644642829895,
+ -0.1511813998222351,
+ -0.1637134701013565,
+ -0.13903579115867615,
+ -0.16072392463684082,
+ -0.1492268592119217,
+ -0.15027734637260437,
+ -0.1445811688899994,
+ -0.16147753596305847,
+ -0.15175095200538635,
+ -0.15235379338264465,
+ -0.1509360522031784,
+ -0.12830397486686707,
+ -0.15057522058486938,
+ -0.15306973457336426,
+ -0.14293436706066132,
+ -0.1790410280227661,
+ -0.18536873161792755,
+ -0.1564374715089798,
+ -0.13938041031360626,
+ -0.15196523070335388,
+ -0.16497144103050232,
+ -0.1575169712305069,
+ -0.13661988079547882,
+ -0.1409750133752823,
+ -0.1703185886144638,
+ -0.17436306178569794,
+ -0.14193715155124664,
+ -0.18768355250358582,
+ -0.14141516387462616,
+ -0.13336598873138428,
+ -0.1801472306251526,
+ -0.1393563449382782,
+ -0.16698619723320007,
+ -0.15435615181922913,
+ -0.21064230799674988,
+ -0.16875313222408295,
+ -0.14781910181045532,
+ -0.1651083528995514,
+ -0.18486177921295166,
+ -0.15172958374023438,
+ -0.16736473143100739,
+ -0.16795116662979126,
+ -0.14495807886123657,
+ -0.15543317794799805,
+ -0.18848484754562378,
+ -0.15317225456237793,
+ -0.19696597754955292,
+ -0.1397564858198166,
+ -0.1672094315290451,
+ -0.18571507930755615,
+ -0.1743926703929901,
+ -0.1872420608997345,
+ -0.1584528386592865,
+ -0.15300709009170532,
+ -0.1836901605129242,
+ -0.15885108709335327,
+ -0.13682210445404053,
+ -0.16959017515182495,
+ -0.13610345125198364,
+ -0.17611168324947357,
+ -0.16875426471233368,
+ -0.13897104561328888,
+ -0.14138875901699066,
+ -0.14363491535186768,
+ -0.15203942358493805,
+ -0.15699471533298492,
+ -0.1491250991821289,
+ -0.17340879142284393,
+ -0.13569985330104828,
+ -0.17317412793636322,
+ -0.15943357348442078,
+ -0.14459286630153656,
+ -0.14876499772071838,
+ -0.16744938492774963,
+ -0.1714293658733368,
+ -0.15584594011306763,
+ -0.202422633767128,
+ -0.14477691054344177,
+ -0.1826821267604828,
+ -0.16252434253692627,
+ -0.16383270919322968,
+ -0.1365833282470703,
+ -0.12355146557092667,
+ -0.1771114021539688,
+ -0.1451554298400879,
+ -0.16385985910892487,
+ -0.19133420288562775,
+ -0.19420349597930908,
+ -0.14946630597114563,
+ -0.1584477424621582,
+ -0.19430674612522125,
+ -0.15338581800460815,
+ -0.16797935962677002,
+ -0.15683729946613312,
+ -0.1583394706249237,
+ -0.1819034218788147,
+ -0.15180723369121552,
+ -0.15631382167339325,
+ -0.18879325687885284,
+ -0.15395787358283997,
+ -0.13617651164531708,
+ -0.1624879091978073,
+ -0.13891173899173737,
+ -0.17976829409599304,
+ -0.15283122658729553,
+ -0.16170504689216614,
+ -0.15734826028347015,
+ -0.13847015798091888,
+ -0.16008512675762177,
+ -0.14382719993591309,
+ -0.1763916164636612,
+ -0.1602855771780014,
+ -0.20578841865062714,
+ -0.17615169286727905,
+ -0.17688807845115662,
+ -0.1524146944284439,
+ -0.15956954658031464,
+ -0.1512865573167801,
+ -0.1394628882408142,
+ -0.1679406613111496,
+ -0.15641820430755615,
+ -0.16400398313999176,
+ -0.14440952241420746,
+ -0.16580267250537872,
+ -0.14263761043548584,
+ -0.1104464903473854,
+ -0.1578480750322342,
+ -0.18714609742164612,
+ -0.16389819979667664,
+ -0.1617390364408493,
+ -0.18999920785427094,
+ -0.18069754540920258,
+ -0.1574385017156601,
+ -0.16080868244171143,
+ -0.16739186644554138,
+ -0.18526318669319153,
+ -0.1503898799419403,
+ -0.14810171723365784,
+ -0.17790502309799194,
+ -0.1550786942243576,
+ -0.1573464721441269,
+ -0.14236758649349213,
+ -0.1926742047071457,
+ -0.1595357358455658,
+ -0.15790773928165436,
+ -0.16982778906822205,
+ -0.16535532474517822,
+ -0.15618468821048737,
+ -0.1678353250026703,
+ -0.18292924761772156,
+ -0.16411735117435455,
+ -0.16327659785747528,
+ -0.1551971584558487,
+ -0.16080091893672943,
+ -0.15035004913806915,
+ -0.18276241421699524,
+ -0.16009773313999176,
+ -0.1790349781513214,
+ -0.16502436995506287,
+ -0.17827177047729492,
+ -0.1607906073331833,
+ -0.1495417058467865,
+ -0.17446693778038025,
+ -0.19355933368206024,
+ -0.1553611010313034,
+ -0.15411733090877533,
+ -0.2030193954706192,
+ -0.1776150017976761,
+ -0.15681950747966766,
+ -0.18858681619167328,
+ -0.19558225572109222,
+ -0.20153744518756866,
+ -0.16819266974925995,
+ -0.17968714237213135,
+ -0.18923501670360565,
+ -0.17079053819179535,
+ -0.15620066225528717,
+ -0.1698608696460724,
+ -0.1540473848581314,
+ -0.1747443825006485,
+ -0.17790266871452332,
+ -0.17821960151195526,
+ -0.17125196754932404,
+ -0.18173237144947052,
+ -0.19775895774364471,
+ -0.16846297681331635,
+ -0.2030971348285675,
+ -0.13532480597496033,
+ -0.17114154994487762,
+ -0.12587085366249084
+ ],
+ "manager_cos_sim_T": [
+ 7092,
+ 17295,
+ 27360,
+ 37425,
+ 47446,
+ 57702,
+ 67766,
+ 77887,
+ 88117,
+ 98180,
+ 108201,
+ 118363,
+ 128491,
+ 138566,
+ 148760,
+ 158815,
+ 169025,
+ 179049,
+ 189282,
+ 199401,
+ 209460,
+ 219584,
+ 229726,
+ 239732,
+ 249735,
+ 259775,
+ 269808,
+ 279861,
+ 289940,
+ 299973,
+ 310048,
+ 320136,
+ 330150,
+ 340224,
+ 350309,
+ 360399,
+ 370496,
+ 380586,
+ 390819,
+ 400869,
+ 410935,
+ 420950,
+ 431080,
+ 441210,
+ 451272,
+ 461380,
+ 471442,
+ 481588,
+ 491642,
+ 501803,
+ 511970,
+ 522049,
+ 532228,
+ 542420,
+ 552444,
+ 562617,
+ 572728,
+ 583006,
+ 593033,
+ 603151,
+ 613362,
+ 623380,
+ 633578,
+ 643643,
+ 653716,
+ 663826,
+ 673980,
+ 684046,
+ 694187,
+ 704361,
+ 714366,
+ 724371,
+ 734482,
+ 744535,
+ 754765,
+ 764823,
+ 774937,
+ 785022,
+ 795248,
+ 805473,
+ 815528,
+ 825625,
+ 835868,
+ 845907,
+ 856040,
+ 866047,
+ 876264,
+ 886567,
+ 896651,
+ 906842,
+ 916946,
+ 927044,
+ 937159,
+ 947297,
+ 957483,
+ 967619,
+ 977685,
+ 987931,
+ 997992,
+ 1008057,
+ 1018161,
+ 1028173,
+ 1038181,
+ 1048423,
+ 1058516,
+ 1068664,
+ 1078745,
+ 1088915,
+ 1098982,
+ 1109051,
+ 1119248,
+ 1129476,
+ 1139634,
+ 1149642,
+ 1159787,
+ 1169921,
+ 1179950,
+ 1190126,
+ 1200324,
+ 1210423,
+ 1220527,
+ 1230722,
+ 1240952,
+ 1250992,
+ 1261097,
+ 1271343,
+ 1281413,
+ 1291499,
+ 1301710,
+ 1311847,
+ 1321892,
+ 1331977,
+ 1341990,
+ 1352171,
+ 1362304,
+ 1372388,
+ 1382502,
+ 1392710,
+ 1402796,
+ 1412887,
+ 1422977,
+ 1433185,
+ 1443192,
+ 1453280,
+ 1463367,
+ 1473426,
+ 1483538,
+ 1493651,
+ 1503789,
+ 1513807,
+ 1524054,
+ 1534270,
+ 1544475,
+ 1554621,
+ 1564794,
+ 1574902,
+ 1584946,
+ 1594969,
+ 1605030,
+ 1615240,
+ 1625376,
+ 1635501,
+ 1645593,
+ 1655647,
+ 1665666,
+ 1675690,
+ 1685894,
+ 1695985,
+ 1706141,
+ 1716287,
+ 1726355,
+ 1736463,
+ 1746631,
+ 1756661,
+ 1766740,
+ 1776977,
+ 1787103,
+ 1797200,
+ 1807240,
+ 1817361,
+ 1827400,
+ 1837414,
+ 1847427,
+ 1857487,
+ 1867647,
+ 1877732,
+ 1887967,
+ 1898042,
+ 1908247,
+ 1918306,
+ 1928494,
+ 1938721,
+ 1948923,
+ 1959067,
+ 1969319,
+ 1979533,
+ 1989846,
+ 1999888,
+ 2009996,
+ 2020061,
+ 2030264,
+ 2040327,
+ 2050460,
+ 2060522,
+ 2070580,
+ 2080743,
+ 2090936,
+ 2101068,
+ 2111206,
+ 2121377,
+ 2131404,
+ 2141639,
+ 2151758,
+ 2161825,
+ 2171926,
+ 2181983,
+ 2192015,
+ 2202021,
+ 2212255,
+ 2222394,
+ 2232596,
+ 2242789,
+ 2252936,
+ 2262978,
+ 2273109,
+ 2283149,
+ 2293150,
+ 2303299,
+ 2313529,
+ 2323715,
+ 2333847,
+ 2343908,
+ 2354027,
+ 2364236,
+ 2374408,
+ 2384421,
+ 2394449,
+ 2404570,
+ 2414720,
+ 2424739,
+ 2434910,
+ 2444954,
+ 2455136,
+ 2465287,
+ 2475416,
+ 2485480,
+ 2495611,
+ 2505778,
+ 2516009,
+ 2526028,
+ 2536301,
+ 2546337,
+ 2556388,
+ 2566565,
+ 2576644,
+ 2586843,
+ 2596917,
+ 2607004,
+ 2617084,
+ 2627196,
+ 2637376,
+ 2647508,
+ 2657647,
+ 2667719,
+ 2677916,
+ 2688062,
+ 2698165,
+ 2708266,
+ 2718409,
+ 2728434,
+ 2738546,
+ 2748676,
+ 2758916,
+ 2768940,
+ 2779034,
+ 2789172,
+ 2799238,
+ 2809325,
+ 2819376,
+ 2829617,
+ 2839810,
+ 2850035,
+ 2860186,
+ 2870357,
+ 2880499,
+ 2890628,
+ 2900645,
+ 2910940,
+ 2921130,
+ 2931376,
+ 2941396,
+ 2951566,
+ 2961627,
+ 2971677,
+ 2981706,
+ 2991857,
+ 3001949,
+ 3012036,
+ 3022219,
+ 3032228,
+ 3042327,
+ 3052539,
+ 3062795,
+ 3072890,
+ 3082958,
+ 3093114,
+ 3103171,
+ 3113209,
+ 3123383,
+ 3133426,
+ 3143440,
+ 3153512,
+ 3163568,
+ 3173608,
+ 3183697,
+ 3193908,
+ 3203975,
+ 3214077,
+ 3224153,
+ 3234356,
+ 3244419,
+ 3254536,
+ 3264737,
+ 3274833,
+ 3284908,
+ 3295136,
+ 3305228,
+ 3315439,
+ 3325546,
+ 3335646,
+ 3345788,
+ 3355992,
+ 3366175,
+ 3376417,
+ 3386451,
+ 3396577,
+ 3406704,
+ 3416900,
+ 3427128,
+ 3437217,
+ 3447329,
+ 3457428,
+ 3467437,
+ 3477621,
+ 3487827,
+ 3497962,
+ 3508158,
+ 3518325,
+ 3528428,
+ 3538602,
+ 3548805,
+ 3558923,
+ 3569065,
+ 3579228,
+ 3589313,
+ 3599339,
+ 3609365,
+ 3619431,
+ 3629497,
+ 3639722,
+ 3649807,
+ 3660057,
+ 3670255,
+ 3680439,
+ 3690694,
+ 3700742,
+ 3710792,
+ 3720932,
+ 3731140,
+ 3741222,
+ 3751315,
+ 3761564,
+ 3771760,
+ 3781886,
+ 3792129,
+ 3802264,
+ 3812528,
+ 3822619,
+ 3832758,
+ 3842873,
+ 3852997,
+ 3863097,
+ 3873229,
+ 3883380,
+ 3893402,
+ 3903403,
+ 3913426,
+ 3923558,
+ 3933711,
+ 3943715,
+ 3953867,
+ 3963931,
+ 3974121,
+ 3984302,
+ 3994504,
+ 4004625,
+ 4014639,
+ 4024664,
+ 4034759,
+ 4044842
+ ],
+ "manager_loss": [
+ 3.0766162872314453,
+ -1175.2734375,
+ -1768.131591796875,
+ -2216.083984375,
+ -2651.2783203125,
+ -2937.458984375,
+ -3469.669921875,
+ -3744.744384765625,
+ -4038.93115234375,
+ -4600.76220703125,
+ -4844.17431640625,
+ -5198.88916015625,
+ -5537.1484375,
+ -5875.46630859375,
+ -6083.3759765625,
+ -6528.75927734375,
+ -6957.37646484375,
+ -7218.01611328125,
+ -7553.84912109375,
+ -7853.47412109375,
+ -8429.71875,
+ -8522.8984375,
+ -8908.8515625,
+ -9398.5380859375,
+ -9289.486328125,
+ -10060.71875,
+ -10111.802734375,
+ -10394.3037109375,
+ -10977.92578125,
+ -11089.646484375,
+ -11167.65234375,
+ -11905.0634765625,
+ -12146.9130859375,
+ -12446.8583984375,
+ -13070.7236328125,
+ -12787.515625,
+ -13486.6484375,
+ -13506.1884765625,
+ -14168.748046875,
+ -14274.173828125,
+ -14954.0087890625,
+ -14501.78515625,
+ -15278.6064453125,
+ -15439.5576171875,
+ -15746.658203125,
+ -16400.017578125,
+ -16510.359375,
+ -16521.248046875,
+ -16742.76171875,
+ -17047.421875,
+ -17805.130859375,
+ -17795.267578125,
+ -17976.0703125,
+ -18761.09375,
+ -18339.06640625,
+ -18822.470703125,
+ -18967.947265625,
+ -19671.513671875,
+ -19881.318359375,
+ -20037.7578125,
+ -20832.794921875,
+ -21072.955078125,
+ -21256.41796875,
+ -20843.107421875,
+ -21564.25,
+ -22203.166015625,
+ -23210.91796875,
+ -22720.056640625,
+ -23015.025390625,
+ -23964.240234375,
+ -23900.974609375,
+ -24272.765625,
+ -23932.08984375,
+ -25138.828125,
+ -25112.5625,
+ -25070.720703125,
+ -25355.36328125,
+ -26418.724609375,
+ -26760.31640625,
+ -26457.16015625,
+ -26733.7890625,
+ -27687.9453125,
+ -27883.619140625,
+ -27659.8671875,
+ -28528.62109375,
+ -28546.826171875,
+ -29324.33203125,
+ -29769.6015625,
+ -29586.654296875,
+ -30634.39453125,
+ -30992.5078125,
+ -31031.126953125,
+ -31662.3046875,
+ -30143.736328125,
+ -31403.59765625,
+ -30976.947265625,
+ -32408.2109375,
+ -31795.79296875,
+ -32150.36328125,
+ -33235.1328125,
+ -32885.02734375,
+ -33267.50390625,
+ -34477.3515625,
+ -34305.6328125,
+ -33260.9453125,
+ -35025.0703125,
+ -35347.890625,
+ -34200.46875,
+ -35254.0078125,
+ -35649.44921875,
+ -35433.8828125,
+ -36746.0546875,
+ -36420.5859375,
+ -36148.34765625,
+ -36879.0625,
+ -37232.1484375,
+ -37012.546875,
+ -38134.60546875,
+ -38314.66015625,
+ -38327.390625,
+ -38052.62109375,
+ -38873.83984375,
+ -39358.09375,
+ -38701.4765625,
+ -38625.4375,
+ -40732.5546875,
+ -41444.76953125,
+ -40099.66015625,
+ -41899.0,
+ -42390.0390625,
+ -42325.0546875,
+ -41603.94921875,
+ -42394.65625,
+ -43257.6875,
+ -43686.5546875,
+ -43342.7578125,
+ -42879.51953125,
+ -42741.91015625,
+ -45436.828125,
+ -44417.9609375,
+ -45463.98046875,
+ -44746.51953125,
+ -44717.8984375,
+ -44579.95703125,
+ -46019.48828125,
+ -47352.76953125,
+ -47270.41015625,
+ -46138.4296875,
+ -48676.421875,
+ -46563.8359375,
+ -48977.01953125,
+ -47988.390625,
+ -48077.64453125,
+ -48651.1953125,
+ -48725.796875,
+ -50530.15234375,
+ -49555.04296875,
+ -50506.5234375,
+ -50816.0234375,
+ -51247.41796875,
+ -50344.49609375,
+ -51648.97265625,
+ -51730.05078125,
+ -50479.11328125,
+ -52597.4921875,
+ -53756.82421875,
+ -53311.28125,
+ -53552.296875,
+ -53193.5703125,
+ -54374.40625,
+ -54130.9375,
+ -57880.8203125,
+ -55618.82421875,
+ -55682.5,
+ -54540.73828125,
+ -56942.734375,
+ -57105.06640625,
+ -56798.71484375,
+ -56285.2109375,
+ -55840.55859375,
+ -57822.1796875,
+ -58234.70703125,
+ -59581.0625,
+ -59402.921875,
+ -58494.58984375,
+ -57748.83984375,
+ -58256.1328125,
+ -58546.40234375,
+ -63210.40234375,
+ -60109.73828125,
+ -59901.83203125,
+ -58983.33984375,
+ -59652.23046875,
+ -60908.265625,
+ -62076.01171875,
+ -60785.171875,
+ -61412.78515625,
+ -60142.57421875,
+ -60635.3125,
+ -62631.73046875,
+ -62095.98046875,
+ -60776.11328125,
+ -62768.91796875,
+ -61400.25,
+ -64576.8046875,
+ -65236.2265625,
+ -61610.8359375,
+ -64474.84765625,
+ -63371.01171875,
+ -63888.8203125,
+ -62644.109375,
+ -63224.97265625,
+ -65382.30859375,
+ -62791.3515625,
+ -65427.171875,
+ -64584.17578125,
+ -64293.12890625,
+ -65939.8046875,
+ -65662.1875,
+ -65738.5625,
+ -65019.23046875,
+ -65290.78515625,
+ -65658.5859375,
+ -66215.9375,
+ -69195.1875,
+ -68713.2265625,
+ -66956.4375,
+ -66536.5859375,
+ -66889.9609375,
+ -68306.1796875,
+ -68102.078125,
+ -68538.625,
+ -69006.1015625,
+ -68125.71875,
+ -69788.1953125,
+ -68768.6953125,
+ -68794.8828125,
+ -68795.6640625,
+ -70940.1796875,
+ -69292.84375,
+ -70581.5703125,
+ -69366.4921875,
+ -71040.75,
+ -70315.125,
+ -70099.8828125,
+ -71996.640625,
+ -69318.421875,
+ -69479.1953125,
+ -74624.4296875,
+ -72378.6953125,
+ -72725.2109375,
+ -69802.28125,
+ -71696.6484375,
+ -72170.9296875,
+ -72634.28125,
+ -73560.1796875,
+ -70937.609375,
+ -74060.3125,
+ -73848.4140625,
+ -74304.2265625,
+ -74776.65625,
+ -74683.21875,
+ -75485.34375,
+ -75292.515625,
+ -74968.0078125,
+ -77933.8203125,
+ -78639.6171875,
+ -77838.4296875,
+ -77514.6796875,
+ -76294.3671875,
+ -75652.0859375,
+ -78112.3984375,
+ -78734.015625,
+ -80057.890625,
+ -78442.9140625,
+ -76457.875,
+ -80396.609375,
+ -80674.28125,
+ -80353.515625,
+ -80847.65625,
+ -81927.8125,
+ -81163.46875,
+ -80021.421875,
+ -80926.296875,
+ -81721.53125,
+ -81683.1640625,
+ -82269.1328125,
+ -82745.9921875,
+ -83491.734375,
+ -80758.1484375,
+ -83422.296875,
+ -81363.3203125,
+ -82491.875,
+ -82299.8203125,
+ -81300.421875,
+ -82415.828125,
+ -84603.203125,
+ -87052.6171875,
+ -87204.234375,
+ -83572.671875,
+ -84032.703125,
+ -84894.5859375,
+ -85048.7578125,
+ -84634.265625,
+ -83712.359375,
+ -89089.859375,
+ -87162.90625,
+ -87023.9375,
+ -89424.359375,
+ -88079.8671875,
+ -86936.890625,
+ -86518.328125,
+ -86045.5546875,
+ -87960.515625,
+ -88727.265625,
+ -89242.78125,
+ -86218.578125,
+ -88456.53125,
+ -88838.5703125,
+ -88498.53125,
+ -87716.40625,
+ -89834.0078125,
+ -86416.9296875,
+ -87854.046875,
+ -87589.125,
+ -87385.5703125,
+ -86698.1484375,
+ -87258.53125,
+ -90478.484375,
+ -91629.4921875,
+ -91351.6875,
+ -91580.203125,
+ -88877.0234375,
+ -89799.9296875,
+ -88382.765625,
+ -89258.9921875,
+ -90930.265625,
+ -91925.296875,
+ -91457.0390625,
+ -90063.203125,
+ -91942.1640625,
+ -91005.375,
+ -91936.7890625,
+ -90370.5234375,
+ -89266.1953125,
+ -90248.5546875,
+ -91369.171875,
+ -93080.59375,
+ -93185.09375,
+ -91542.0859375,
+ -88940.8046875,
+ -91681.9296875,
+ -91348.7890625,
+ -95162.1640625,
+ -92593.5859375,
+ -90970.78125,
+ -94636.171875,
+ -91184.6953125,
+ -95272.7265625,
+ -96726.0078125,
+ -93715.34375,
+ -94715.421875,
+ -93707.78125,
+ -94861.8125,
+ -96257.609375,
+ -93566.7421875,
+ -94057.0390625,
+ -94552.9921875,
+ -96951.296875,
+ -92254.5078125,
+ -95873.2265625,
+ -94646.1015625,
+ -97104.453125,
+ -99692.6953125,
+ -97796.4453125,
+ -95913.5546875,
+ -98847.65625,
+ -96029.46875,
+ -94617.890625,
+ -97062.8984375,
+ -97578.96875,
+ -96398.03125,
+ -100552.3125,
+ -96851.3046875,
+ -97913.9921875,
+ -97218.71875,
+ -99207.5703125,
+ -98987.15625,
+ -100393.3671875,
+ -101432.3359375,
+ -98203.03125,
+ -99664.515625,
+ -97944.390625,
+ -97301.0625,
+ -99361.796875,
+ -97414.0,
+ -99409.3359375,
+ -101722.84375,
+ -99130.171875,
+ -101044.078125
+ ],
+ "manager_loss_T": [
+ 7092,
+ 17295,
+ 27360,
+ 37425,
+ 47446,
+ 57702,
+ 67766,
+ 77887,
+ 88117,
+ 98180,
+ 108201,
+ 118363,
+ 128491,
+ 138566,
+ 148760,
+ 158815,
+ 169025,
+ 179049,
+ 189282,
+ 199401,
+ 209460,
+ 219584,
+ 229726,
+ 239732,
+ 249735,
+ 259775,
+ 269808,
+ 279861,
+ 289940,
+ 299973,
+ 310048,
+ 320136,
+ 330150,
+ 340224,
+ 350309,
+ 360399,
+ 370496,
+ 380586,
+ 390819,
+ 400869,
+ 410935,
+ 420950,
+ 431080,
+ 441210,
+ 451272,
+ 461380,
+ 471442,
+ 481588,
+ 491642,
+ 501803,
+ 511970,
+ 522049,
+ 532228,
+ 542420,
+ 552444,
+ 562617,
+ 572728,
+ 583006,
+ 593033,
+ 603151,
+ 613362,
+ 623380,
+ 633578,
+ 643643,
+ 653716,
+ 663826,
+ 673980,
+ 684046,
+ 694187,
+ 704361,
+ 714366,
+ 724371,
+ 734482,
+ 744535,
+ 754765,
+ 764823,
+ 774937,
+ 785022,
+ 795248,
+ 805473,
+ 815528,
+ 825625,
+ 835868,
+ 845907,
+ 856040,
+ 866047,
+ 876264,
+ 886567,
+ 896651,
+ 906842,
+ 916946,
+ 927044,
+ 937159,
+ 947297,
+ 957483,
+ 967619,
+ 977685,
+ 987931,
+ 997992,
+ 1008057,
+ 1018161,
+ 1028173,
+ 1038181,
+ 1048423,
+ 1058516,
+ 1068664,
+ 1078745,
+ 1088915,
+ 1098982,
+ 1109051,
+ 1119248,
+ 1129476,
+ 1139634,
+ 1149642,
+ 1159787,
+ 1169921,
+ 1179950,
+ 1190126,
+ 1200324,
+ 1210423,
+ 1220527,
+ 1230722,
+ 1240952,
+ 1250992,
+ 1261097,
+ 1271343,
+ 1281413,
+ 1291499,
+ 1301710,
+ 1311847,
+ 1321892,
+ 1331977,
+ 1341990,
+ 1352171,
+ 1362304,
+ 1372388,
+ 1382502,
+ 1392710,
+ 1402796,
+ 1412887,
+ 1422977,
+ 1433185,
+ 1443192,
+ 1453280,
+ 1463367,
+ 1473426,
+ 1483538,
+ 1493651,
+ 1503789,
+ 1513807,
+ 1524054,
+ 1534270,
+ 1544475,
+ 1554621,
+ 1564794,
+ 1574902,
+ 1584946,
+ 1594969,
+ 1605030,
+ 1615240,
+ 1625376,
+ 1635501,
+ 1645593,
+ 1655647,
+ 1665666,
+ 1675690,
+ 1685894,
+ 1695985,
+ 1706141,
+ 1716287,
+ 1726355,
+ 1736463,
+ 1746631,
+ 1756661,
+ 1766740,
+ 1776977,
+ 1787103,
+ 1797200,
+ 1807240,
+ 1817361,
+ 1827400,
+ 1837414,
+ 1847427,
+ 1857487,
+ 1867647,
+ 1877732,
+ 1887967,
+ 1898042,
+ 1908247,
+ 1918306,
+ 1928494,
+ 1938721,
+ 1948923,
+ 1959067,
+ 1969319,
+ 1979533,
+ 1989846,
+ 1999888,
+ 2009996,
+ 2020061,
+ 2030264,
+ 2040327,
+ 2050460,
+ 2060522,
+ 2070580,
+ 2080743,
+ 2090936,
+ 2101068,
+ 2111206,
+ 2121377,
+ 2131404,
+ 2141639,
+ 2151758,
+ 2161825,
+ 2171926,
+ 2181983,
+ 2192015,
+ 2202021,
+ 2212255,
+ 2222394,
+ 2232596,
+ 2242789,
+ 2252936,
+ 2262978,
+ 2273109,
+ 2283149,
+ 2293150,
+ 2303299,
+ 2313529,
+ 2323715,
+ 2333847,
+ 2343908,
+ 2354027,
+ 2364236,
+ 2374408,
+ 2384421,
+ 2394449,
+ 2404570,
+ 2414720,
+ 2424739,
+ 2434910,
+ 2444954,
+ 2455136,
+ 2465287,
+ 2475416,
+ 2485480,
+ 2495611,
+ 2505778,
+ 2516009,
+ 2526028,
+ 2536301,
+ 2546337,
+ 2556388,
+ 2566565,
+ 2576644,
+ 2586843,
+ 2596917,
+ 2607004,
+ 2617084,
+ 2627196,
+ 2637376,
+ 2647508,
+ 2657647,
+ 2667719,
+ 2677916,
+ 2688062,
+ 2698165,
+ 2708266,
+ 2718409,
+ 2728434,
+ 2738546,
+ 2748676,
+ 2758916,
+ 2768940,
+ 2779034,
+ 2789172,
+ 2799238,
+ 2809325,
+ 2819376,
+ 2829617,
+ 2839810,
+ 2850035,
+ 2860186,
+ 2870357,
+ 2880499,
+ 2890628,
+ 2900645,
+ 2910940,
+ 2921130,
+ 2931376,
+ 2941396,
+ 2951566,
+ 2961627,
+ 2971677,
+ 2981706,
+ 2991857,
+ 3001949,
+ 3012036,
+ 3022219,
+ 3032228,
+ 3042327,
+ 3052539,
+ 3062795,
+ 3072890,
+ 3082958,
+ 3093114,
+ 3103171,
+ 3113209,
+ 3123383,
+ 3133426,
+ 3143440,
+ 3153512,
+ 3163568,
+ 3173608,
+ 3183697,
+ 3193908,
+ 3203975,
+ 3214077,
+ 3224153,
+ 3234356,
+ 3244419,
+ 3254536,
+ 3264737,
+ 3274833,
+ 3284908,
+ 3295136,
+ 3305228,
+ 3315439,
+ 3325546,
+ 3335646,
+ 3345788,
+ 3355992,
+ 3366175,
+ 3376417,
+ 3386451,
+ 3396577,
+ 3406704,
+ 3416900,
+ 3427128,
+ 3437217,
+ 3447329,
+ 3457428,
+ 3467437,
+ 3477621,
+ 3487827,
+ 3497962,
+ 3508158,
+ 3518325,
+ 3528428,
+ 3538602,
+ 3548805,
+ 3558923,
+ 3569065,
+ 3579228,
+ 3589313,
+ 3599339,
+ 3609365,
+ 3619431,
+ 3629497,
+ 3639722,
+ 3649807,
+ 3660057,
+ 3670255,
+ 3680439,
+ 3690694,
+ 3700742,
+ 3710792,
+ 3720932,
+ 3731140,
+ 3741222,
+ 3751315,
+ 3761564,
+ 3771760,
+ 3781886,
+ 3792129,
+ 3802264,
+ 3812528,
+ 3822619,
+ 3832758,
+ 3842873,
+ 3852997,
+ 3863097,
+ 3873229,
+ 3883380,
+ 3893402,
+ 3903403,
+ 3913426,
+ 3923558,
+ 3933711,
+ 3943715,
+ 3953867,
+ 3963931,
+ 3974121,
+ 3984302,
+ 3994504,
+ 4004625,
+ 4014639,
+ 4024664,
+ 4034759,
+ 4044842
+ ],
+ "q_taken_mean": [
+ -0.038346746609295684,
+ 0.06931478323063381,
+ 0.08008451234131364,
+ 0.16103294136597937,
+ 0.2088011070474016,
+ 0.2361414727347838,
+ 0.24165538182550014,
+ 0.28531573737157534,
+ 0.3826424638548126,
+ 0.34678029341053773,
+ 0.42101779372805875,
+ 0.433895531727777,
+ 0.4833551677885897,
+ 0.5416639477805469,
+ 0.5587883239340201,
+ 0.5529323133738908,
+ 0.5894469007269951,
+ 0.5866262027071102,
+ 0.6111448497511803,
+ 0.6457053571428572,
+ 0.6017672787336217,
+ 0.6801388346989206,
+ 0.7014069365976145,
+ 0.7213642018948109,
+ 0.6693146516393442,
+ 0.6660808408670978,
+ 0.6721036853641367,
+ 0.6439885503736069,
+ 0.6880593279621767,
+ 0.6697568761288866,
+ 0.6586962769177903,
+ 0.7483509908045607,
+ 0.6912405239780924,
+ 0.7318529637148543,
+ 0.726810350258284,
+ 0.6710916916414175,
+ 0.7590140789854606,
+ 0.7173573391496643,
+ 0.7075652788384912,
+ 0.7675521367452891,
+ 0.7396706938151688,
+ 0.7557378288165916,
+ 0.7992968251435865,
+ 0.8498394830949285,
+ 0.8578941765885649,
+ 0.9476962984647411,
+ 0.9726256227874656,
+ 1.0115850915605096,
+ 1.1005408262849707,
+ 0.9996337473729148,
+ 1.0474622790002543,
+ 1.0536028449967298,
+ 1.0572979912156053,
+ 1.0824650969799532,
+ 1.0241346122541997,
+ 0.9745940854496662,
+ 1.0306057610265162,
+ 1.1144625776004053,
+ 1.103831898956419,
+ 1.176906245910221,
+ 1.049001646017125,
+ 1.0959068548387096,
+ 1.1343395199081645,
+ 1.2146841615065325,
+ 1.1839398483382275,
+ 1.1132595570981618,
+ 1.2583939112802205,
+ 1.2903319051768656,
+ 1.145361474183899,
+ 1.3623549781732707,
+ 1.1477440097184988,
+ 1.2474034749034748,
+ 1.151983342271082,
+ 1.0844016381441957,
+ 1.149545733254398,
+ 1.1338258302736983,
+ 1.0482990974759778,
+ 1.1196562142675646,
+ 1.1567248197349618,
+ 1.1610145231699127,
+ 1.1486286589700792,
+ 1.224136162179908,
+ 1.1418467717867091,
+ 1.1085422516379195,
+ 1.1038416192207436,
+ 1.2540309497197384,
+ 1.1144985876883082,
+ 1.163357099514563,
+ 1.0609280208333334,
+ 1.0816343626806832,
+ 1.0600682528864658,
+ 0.989406862745098,
+ 1.0613008598993288,
+ 1.010186217193809,
+ 1.12129824174544,
+ 1.0796127016662258,
+ 1.12018180373394,
+ 1.0968162546829008,
+ 1.1103513799893205,
+ 1.0565664543858486,
+ 1.0952739782465393,
+ 1.0378883092495284,
+ 1.1639414031555322,
+ 1.1142860658965608,
+ 1.07987687837038,
+ 1.0600515043195822,
+ 1.071800796006476,
+ 1.1671205832889304,
+ 1.0902798039082644,
+ 1.1807962250975905,
+ 1.19358314120556,
+ 1.1490830243644068,
+ 1.1923797582304527,
+ 1.106547103861518,
+ 1.202264350427922,
+ 1.078472245676844,
+ 1.022611993018617,
+ 1.1536196793712616,
+ 1.190514701146464,
+ 1.2155380418244524,
+ 1.0826940582238178,
+ 1.1037358312865109,
+ 1.3003998933617702,
+ 1.1820658576814327,
+ 1.112369146230107,
+ 1.2391586397298708,
+ 1.2546002394153226,
+ 1.1709355775803145,
+ 1.148637515456202,
+ 1.183043969785772,
+ 1.1786166316246163,
+ 1.1981893989431969,
+ 1.1084695577206374,
+ 1.2495555579144022,
+ 1.1209241660676947,
+ 1.1488346954693434,
+ 1.1329336764412195,
+ 1.1686919120718462,
+ 1.1372296362532055,
+ 1.139066301703163,
+ 1.2302596788081621,
+ 1.074099475969701,
+ 1.1340603239926987,
+ 1.1244731180915069,
+ 1.238785948639015,
+ 1.1503454521062517,
+ 1.2037004373773708,
+ 1.0774897215312327,
+ 1.1050115107735898,
+ 1.2043614608213906,
+ 1.271551022401667,
+ 1.236955088553197,
+ 1.2568672942910748,
+ 1.2379470509725246,
+ 1.1265108684427922,
+ 1.2020533980582524,
+ 1.2538579389462674,
+ 1.1310600498020686,
+ 1.2275551723291438,
+ 1.245250372361093,
+ 1.1475514305738785,
+ 1.2830664915831407,
+ 1.2362302251542603,
+ 1.1839920135116784,
+ 1.2443737879767292,
+ 1.326594539621643,
+ 1.2746958001328021,
+ 1.3035446762406526,
+ 1.2738729884481839,
+ 1.2992810265492585,
+ 1.327189674017105,
+ 1.372399337594515,
+ 1.2845613663056243,
+ 1.3358725495017048,
+ 1.348265116395411,
+ 1.3074255021601755,
+ 1.3138273506781428,
+ 1.3349901373823836,
+ 1.3086429811345475,
+ 1.2713925700810416,
+ 1.4048313111876851,
+ 1.342687061330214,
+ 1.308103144528653,
+ 1.3862840864504316,
+ 1.3609961864127569,
+ 1.3932231034923883,
+ 1.286046029111338,
+ 1.3373085393110702,
+ 1.5301647448401712,
+ 1.323023687214612,
+ 1.3844604857050034,
+ 1.450296157363501,
+ 1.412920566712049,
+ 1.4378945480631278,
+ 1.4377807343234323,
+ 1.4140061275321065,
+ 1.502049633553147,
+ 1.451646972841932,
+ 1.4719633827917773,
+ 1.50927756563999,
+ 1.4443042236490995,
+ 1.5768178885089836,
+ 1.5052708626647895,
+ 1.4312559582511506,
+ 1.5149254108297414,
+ 1.587135697162748,
+ 1.4855769392775307,
+ 1.5493841826986428,
+ 1.4344909815436242,
+ 1.446152138478007,
+ 1.3695926664240339,
+ 1.4872832369942197,
+ 1.3395160832468207,
+ 1.36502767188336,
+ 1.3601053011150006,
+ 1.2977208587011668,
+ 1.435389295041269,
+ 1.3957898739235386,
+ 1.3623402211417817,
+ 1.3680474750066207,
+ 1.4737243757451317,
+ 1.4253150383204507,
+ 1.4510761690706173,
+ 1.4973401017344896,
+ 1.6176164491449145,
+ 1.5120344209379473,
+ 1.4854616889928987,
+ 1.6336518193691312,
+ 1.435539289567204,
+ 1.4861654098231207,
+ 1.6061861082172157,
+ 1.5733574241730945,
+ 1.4828769004753224,
+ 1.5832088815789473,
+ 1.4751416574531737,
+ 1.418255428050087,
+ 1.417734293662155,
+ 1.342964221306196,
+ 1.4862042974732006,
+ 1.460891455411144,
+ 1.4724510919767748,
+ 1.4124382718543902,
+ 1.5419273522199277,
+ 1.5810577620967743,
+ 1.5370686700182816,
+ 1.4878773438494843,
+ 1.481181613343089,
+ 1.348774685115759,
+ 1.5122287175015854,
+ 1.5215505324191103,
+ 1.5140683916265765,
+ 1.4856829533811475,
+ 1.6033097280540611,
+ 1.387802852676827,
+ 1.4829622541441916,
+ 1.4336918969849246,
+ 1.4210105762824783,
+ 1.4348716952398894,
+ 1.5377239141084258,
+ 1.3830288244220585,
+ 1.3422130343835796,
+ 1.422248058308941,
+ 1.3699207224182168,
+ 1.3187278741744983,
+ 1.4288187092833877,
+ 1.2626054465973535,
+ 1.3579937452615618,
+ 1.3081800992192583,
+ 1.304267745015536,
+ 1.2123058372792423,
+ 1.2618429748694517,
+ 1.279807522195059,
+ 1.2199565079416743,
+ 1.3111688771568375,
+ 1.2227504664789839,
+ 1.2551335717710999,
+ 1.2371737234502347,
+ 1.1918155369939654,
+ 1.1831260579859266,
+ 1.2228529035496827,
+ 1.2133676693239401,
+ 1.1191334754575708,
+ 1.1567714143916525,
+ 1.1394262119615335,
+ 1.0640584667010842,
+ 1.0494755279771824,
+ 1.045004462514875,
+ 1.1789994830641986,
+ 1.2469806710459346,
+ 1.1349667821363518,
+ 1.1881898573422487,
+ 1.2009061825225453,
+ 1.2075729615945705,
+ 1.2076402377359703,
+ 1.2135188342426186,
+ 1.1695324604583601,
+ 1.1767631197681907,
+ 1.2401882713010373,
+ 1.1914813586678599,
+ 1.1511896042596725,
+ 1.177431565696931,
+ 1.2162069322379314,
+ 1.1778876708610118,
+ 1.1648151041666666,
+ 1.1788185268144902,
+ 1.253053418464467,
+ 1.1675224777211968,
+ 1.226866700025416,
+ 1.2428164148708296,
+ 1.149745488473167,
+ 1.2018917382516423,
+ 1.1930020905420993,
+ 1.2247911821096304,
+ 1.2840267070484581,
+ 1.2450856250788445,
+ 1.261478036867934,
+ 1.2788789335664337,
+ 1.2222008945656333,
+ 1.222939828241815,
+ 1.260348972148541,
+ 1.331102754440327,
+ 1.2789090534089453,
+ 1.2090135353468743,
+ 1.2434235992651883,
+ 1.184905938753072,
+ 1.2771411668154002,
+ 1.2899882486639729,
+ 1.2079410321189494,
+ 1.2447626915107373,
+ 1.2970952700746365,
+ 1.3118074612036337,
+ 1.3452216844945097,
+ 1.2883736725117312,
+ 1.3915904529816514,
+ 1.2950575520162328,
+ 1.2816378380788689,
+ 1.3106586828751285,
+ 1.19251498037753,
+ 1.3319928327890895,
+ 1.2961823318854568,
+ 1.346227770234468,
+ 1.2478823768048748,
+ 1.1999575864276568,
+ 1.249998209055018,
+ 1.2077482173370138,
+ 1.2463667156315652,
+ 1.185224358974359,
+ 1.225886688492845,
+ 1.2145508956462319,
+ 1.2358586998581194,
+ 1.1505597925068296,
+ 1.2067753924246414,
+ 1.1481119200194678,
+ 1.1421654065964868,
+ 1.091154262775583,
+ 1.042217497745136,
+ 1.0896941796411137,
+ 1.1036510327728886,
+ 1.0605209077187743,
+ 1.060428574849917,
+ 1.0167144627149796,
+ 1.0427722205996885,
+ 1.1212343929162951,
+ 1.01045948833355,
+ 1.027147475369458,
+ 1.0156028000929491,
+ 1.0294898416311506,
+ 0.9602924246615465,
+ 0.9577996845425868,
+ 0.9779010586319218,
+ 0.9952791779759597,
+ 0.9655361846383975,
+ 0.9321846442558747,
+ 0.9999496098303329,
+ 0.9983126547836287,
+ 1.0638603322108535,
+ 1.1153745713477161,
+ 1.167331622033565,
+ 1.0845544131989597,
+ 1.1913145162636922,
+ 1.2082319245629591,
+ 1.1389458198051947,
+ 1.1763814577136946,
+ 1.1302704647456745,
+ 1.1436411041417762,
+ 1.114732671313955,
+ 1.1339619604714357,
+ 1.186362892141009,
+ 1.1698883841975947,
+ 1.2074006861536466,
+ 1.2329386295180722,
+ 1.2371117387665767,
+ 1.1573743720641962,
+ 1.1921064579090292,
+ 1.0975192578732504,
+ 1.1691076347651095,
+ 1.2142445888874225,
+ 1.2549108007358636,
+ 1.1657666289261046,
+ 1.1823409693297993
+ ],
+ "q_taken_mean_T": [
+ 7092,
+ 17295,
+ 27360,
+ 37425,
+ 47446,
+ 57702,
+ 67766,
+ 77887,
+ 88117,
+ 98180,
+ 108201,
+ 118363,
+ 128491,
+ 138566,
+ 148760,
+ 158815,
+ 169025,
+ 179049,
+ 189282,
+ 199401,
+ 209460,
+ 219584,
+ 229726,
+ 239732,
+ 249735,
+ 259775,
+ 269808,
+ 279861,
+ 289940,
+ 299973,
+ 310048,
+ 320136,
+ 330150,
+ 340224,
+ 350309,
+ 360399,
+ 370496,
+ 380586,
+ 390819,
+ 400869,
+ 410935,
+ 420950,
+ 431080,
+ 441210,
+ 451272,
+ 461380,
+ 471442,
+ 481588,
+ 491642,
+ 501803,
+ 511970,
+ 522049,
+ 532228,
+ 542420,
+ 552444,
+ 562617,
+ 572728,
+ 583006,
+ 593033,
+ 603151,
+ 613362,
+ 623380,
+ 633578,
+ 643643,
+ 653716,
+ 663826,
+ 673980,
+ 684046,
+ 694187,
+ 704361,
+ 714366,
+ 724371,
+ 734482,
+ 744535,
+ 754765,
+ 764823,
+ 774937,
+ 785022,
+ 795248,
+ 805473,
+ 815528,
+ 825625,
+ 835868,
+ 845907,
+ 856040,
+ 866047,
+ 876264,
+ 886567,
+ 896651,
+ 906842,
+ 916946,
+ 927044,
+ 937159,
+ 947297,
+ 957483,
+ 967619,
+ 977685,
+ 987931,
+ 997992,
+ 1008057,
+ 1018161,
+ 1028173,
+ 1038181,
+ 1048423,
+ 1058516,
+ 1068664,
+ 1078745,
+ 1088915,
+ 1098982,
+ 1109051,
+ 1119248,
+ 1129476,
+ 1139634,
+ 1149642,
+ 1159787,
+ 1169921,
+ 1179950,
+ 1190126,
+ 1200324,
+ 1210423,
+ 1220527,
+ 1230722,
+ 1240952,
+ 1250992,
+ 1261097,
+ 1271343,
+ 1281413,
+ 1291499,
+ 1301710,
+ 1311847,
+ 1321892,
+ 1331977,
+ 1341990,
+ 1352171,
+ 1362304,
+ 1372388,
+ 1382502,
+ 1392710,
+ 1402796,
+ 1412887,
+ 1422977,
+ 1433185,
+ 1443192,
+ 1453280,
+ 1463367,
+ 1473426,
+ 1483538,
+ 1493651,
+ 1503789,
+ 1513807,
+ 1524054,
+ 1534270,
+ 1544475,
+ 1554621,
+ 1564794,
+ 1574902,
+ 1584946,
+ 1594969,
+ 1605030,
+ 1615240,
+ 1625376,
+ 1635501,
+ 1645593,
+ 1655647,
+ 1665666,
+ 1675690,
+ 1685894,
+ 1695985,
+ 1706141,
+ 1716287,
+ 1726355,
+ 1736463,
+ 1746631,
+ 1756661,
+ 1766740,
+ 1776977,
+ 1787103,
+ 1797200,
+ 1807240,
+ 1817361,
+ 1827400,
+ 1837414,
+ 1847427,
+ 1857487,
+ 1867647,
+ 1877732,
+ 1887967,
+ 1898042,
+ 1908247,
+ 1918306,
+ 1928494,
+ 1938721,
+ 1948923,
+ 1959067,
+ 1969319,
+ 1979533,
+ 1989846,
+ 1999888,
+ 2009996,
+ 2020061,
+ 2030264,
+ 2040327,
+ 2050460,
+ 2060522,
+ 2070580,
+ 2080743,
+ 2090936,
+ 2101068,
+ 2111206,
+ 2121377,
+ 2131404,
+ 2141639,
+ 2151758,
+ 2161825,
+ 2171926,
+ 2181983,
+ 2192015,
+ 2202021,
+ 2212255,
+ 2222394,
+ 2232596,
+ 2242789,
+ 2252936,
+ 2262978,
+ 2273109,
+ 2283149,
+ 2293150,
+ 2303299,
+ 2313529,
+ 2323715,
+ 2333847,
+ 2343908,
+ 2354027,
+ 2364236,
+ 2374408,
+ 2384421,
+ 2394449,
+ 2404570,
+ 2414720,
+ 2424739,
+ 2434910,
+ 2444954,
+ 2455136,
+ 2465287,
+ 2475416,
+ 2485480,
+ 2495611,
+ 2505778,
+ 2516009,
+ 2526028,
+ 2536301,
+ 2546337,
+ 2556388,
+ 2566565,
+ 2576644,
+ 2586843,
+ 2596917,
+ 2607004,
+ 2617084,
+ 2627196,
+ 2637376,
+ 2647508,
+ 2657647,
+ 2667719,
+ 2677916,
+ 2688062,
+ 2698165,
+ 2708266,
+ 2718409,
+ 2728434,
+ 2738546,
+ 2748676,
+ 2758916,
+ 2768940,
+ 2779034,
+ 2789172,
+ 2799238,
+ 2809325,
+ 2819376,
+ 2829617,
+ 2839810,
+ 2850035,
+ 2860186,
+ 2870357,
+ 2880499,
+ 2890628,
+ 2900645,
+ 2910940,
+ 2921130,
+ 2931376,
+ 2941396,
+ 2951566,
+ 2961627,
+ 2971677,
+ 2981706,
+ 2991857,
+ 3001949,
+ 3012036,
+ 3022219,
+ 3032228,
+ 3042327,
+ 3052539,
+ 3062795,
+ 3072890,
+ 3082958,
+ 3093114,
+ 3103171,
+ 3113209,
+ 3123383,
+ 3133426,
+ 3143440,
+ 3153512,
+ 3163568,
+ 3173608,
+ 3183697,
+ 3193908,
+ 3203975,
+ 3214077,
+ 3224153,
+ 3234356,
+ 3244419,
+ 3254536,
+ 3264737,
+ 3274833,
+ 3284908,
+ 3295136,
+ 3305228,
+ 3315439,
+ 3325546,
+ 3335646,
+ 3345788,
+ 3355992,
+ 3366175,
+ 3376417,
+ 3386451,
+ 3396577,
+ 3406704,
+ 3416900,
+ 3427128,
+ 3437217,
+ 3447329,
+ 3457428,
+ 3467437,
+ 3477621,
+ 3487827,
+ 3497962,
+ 3508158,
+ 3518325,
+ 3528428,
+ 3538602,
+ 3548805,
+ 3558923,
+ 3569065,
+ 3579228,
+ 3589313,
+ 3599339,
+ 3609365,
+ 3619431,
+ 3629497,
+ 3639722,
+ 3649807,
+ 3660057,
+ 3670255,
+ 3680439,
+ 3690694,
+ 3700742,
+ 3710792,
+ 3720932,
+ 3731140,
+ 3741222,
+ 3751315,
+ 3761564,
+ 3771760,
+ 3781886,
+ 3792129,
+ 3802264,
+ 3812528,
+ 3822619,
+ 3832758,
+ 3842873,
+ 3852997,
+ 3863097,
+ 3873229,
+ 3883380,
+ 3893402,
+ 3903403,
+ 3913426,
+ 3923558,
+ 3933711,
+ 3943715,
+ 3953867,
+ 3963931,
+ 3974121,
+ 3984302,
+ 3994504,
+ 4004625,
+ 4014639,
+ 4024664,
+ 4034759,
+ 4044842
+ ],
+ "return_max": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.316831683168317
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 19.84223300970874
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.920792079207928
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.30339805825243
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 22.910891089108905
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.92079207920791
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.883495145631066
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.378640776699037
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.72277227722772
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.72277227722772
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.35643564356435
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118812
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.572815533980595
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.22029702970297
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.556930693069305
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.262135922330103
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.66990291262136
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.45631067961166
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.15841584158416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.76699029126214
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.881188118811878
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.52475247524751
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.184466019417496
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207914
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207914
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.121287128712865
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207924
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.178217821782173
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.92079207920792
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188112
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.188118811881168
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.336633663366335
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.378640776699037
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207914
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.92079207920792
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168317
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.683168316831676
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.41584158415841
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.92079207920792
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.45631067961166
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.12621359223303
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.067961165048565
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.53712871287128
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.14108910891089
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168306
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.70388349514563
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.61407766990292
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.5728155339806
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.386138613861373
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 34.951456310679646
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 32.017326732673254
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.554455445544548
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.262135922330096
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118811
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.262135922330113
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207914
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168317
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 34.95145631067964
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.378640776699037
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.683168316831686
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.128712871287114
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.864077669902922
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188122
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.572815533980588
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207914
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.841584158415845
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118811
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.57281553398061
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.766990291262132
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.31683168316831
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.940594059405925
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188122
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168314
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.881188118811878
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.456310679611665
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.881188118811895
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.766990291262147
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.262135922330117
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.31683168316831
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207914
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.18446601941749
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.242718446601955
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.549504950495034
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.06796116504856
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.086633663366342
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.766990291262168
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118812
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.262135922330103
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188122
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 32.07920792079208
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.37864077669904
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.554455445544548
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168314
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 32.07920792079208
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.1980198019802
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.683168316831676
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168317
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.184466019417496
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.37864077669904
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.456310679611654
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.26213592233011
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188122
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.26213592233012
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168306
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.92079207920793
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.456310679611654
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.592233009708742
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.766990291262132
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 32.07920792079207
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207917
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.297029702970292
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.769801980198018
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188112
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.683168316831676
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.37864077669905
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118812
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.92079207920792
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.31683168316831
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.31683168316831
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.262135922330103
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118812
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.999999999999993
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.572815533980584
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.26213592233011
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.766990291262147
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.722772277227715
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.184466019417492
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.138613861386137
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.378640776699044
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.15841584158416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.683168316831683
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168317
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.881188118811885
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207907
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.881188118811856
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.572815533980595
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.378640776699047
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 35.90099009900989
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.01980198019802
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 32.07920792079208
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168303
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.262135922330117
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168314
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207917
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.039603960396033
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.88118811881187
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.26213592233011
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207907
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.92079207920792
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.059405940594047
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188098
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168314
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118812
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.92079207920791
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207914
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 32.07920792079208
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.15841584158415
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.92079207920792
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.90099009900991
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207914
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.631067961165055
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168314
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 32.079207920792065
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.456310679611658
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.067961165048555
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.262135922330103
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.683168316831683
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.55445544554455
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 32.07920792079209
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.5728155339806
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.76699029126213
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.262135922330103
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.683168316831683
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.57281553398059
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118811
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.572815533980584
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.517326732673258
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188112
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 32.079207920792065
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.70631067961166
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168317
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.722772277227712
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 32.079207920792065
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.262135922330106
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168306
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.262135922330113
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 34.95145631067961
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.378640776699033
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118811
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 26.95049504950496
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.683168316831683
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.941747572815547
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.76699029126214
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118812
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.133495145631073
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.683168316831676
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118811
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.14356435643564
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.079207920792072
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.31683168316831
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.683168316831672
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 32.07920792079207
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.371287128712865
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.592233009708746
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.51485148514851
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.962871287128714
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.49271844660197
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.683168316831686
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.06796116504856
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.361386138613856
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.048543689320404
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118811
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118811
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.252427184466036
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.683168316831676
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118811
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.722772277227712
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.88118811881188
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 32.07920792079207
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.217821782178213
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 34.95145631067964
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.57281553398059
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207907
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.722772277227712
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.951456310679596
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.742574257425737
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.92079207920792
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168317
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 25.443069306930695
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.572815533980602
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.925742574257423
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.456310679611658
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207907
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.881188118811867
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.683168316831676
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.37864077669905
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.88118811881187
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168314
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207928
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.195544554455452
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.766990291262132
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.31683168316832
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 32.07920792079207
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188112
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.539603960396036
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207907
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.067961165048548
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.92079207920792
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.881188118811878
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 35.72772277227723
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.37864077669905
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.88118811881187
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.72277227722772
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207914
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.92079207920792
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.683168316831676
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.2621359223301
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.72277227722772
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.554455445544555
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.88118811881187
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 32.07920792079207
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.722772277227726
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.683168316831683
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188112
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.766990291262132
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 36.009900990099
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.15841584158416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.31683168316831
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.96534653465346
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.980198019801982
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.92079207920792
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.82178217821781
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.456310679611654
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.980198019801975
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.378640776699037
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168303
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.31683168316832
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.881188118811874
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 30.019417475728176
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.31683168316832
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207907
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.68316831683169
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.54950495049504
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.45631067961166
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.33168316831682
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.378640776699033
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.766990291262132
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207907
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118811
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168317
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.766990291262147
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.881188118811867
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 32.07920792079207
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.554455445544555
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.722772277227712
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207928
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168317
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.57281553398059
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 32.07920792079207
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207914
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.262135922330096
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.5728155339806
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.26213592233011
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 32.079207920792086
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.262135922330103
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207917
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.2621359223301
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.396039603960393
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.68316831683168
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.21534653465346
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168306
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207917
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.079207920792076
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 32.07920792079207
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207917
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.300970873786405
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207914
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188122
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.871287128712858
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.31683168316831
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.31683168316831
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118811
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 32.15841584158416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.88118811881188
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.683168316831676
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188112
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.960396039603967
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.572815533980584
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.88118811881187
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.26213592233013
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188122
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.572815533980588
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.92079207920791
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.456310679611644
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188122
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.198019801980195
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.57281553398058
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168317
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.881188118811878
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 35.64356435643563
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118811
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118812
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118812
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.438118811881182
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.98019801980196
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.356435643564357
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.572815533980584
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.1188118811881
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.188118811881186
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.2549504950495
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.522277227722775
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 35.14563106796117
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.92079207920791
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118811
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.952970297029694
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118811
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 34.95145631067963
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168324
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.881188118811878
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.31683168316831
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.940594059405935
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.90099009900991
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188112
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168317
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.262135922330124
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.06796116504855
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 26.851485148514854
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 32.00990099009901
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.722772277227723
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.237623762376238
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.31683168316831
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.18446601941749
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.456310679611647
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.816831683168324
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.683168316831672
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168303
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.92079207920792
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.35643564356435
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.881188118811874
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.572815533980588
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188112
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207914
+ }
+ ],
+ "return_max_T": [
+ 194,
+ 10374,
+ 20428,
+ 30500,
+ 40676,
+ 50873,
+ 61038,
+ 71210,
+ 81230,
+ 91272,
+ 101424,
+ 111431,
+ 121432,
+ 131673,
+ 141819,
+ 151830,
+ 161986,
+ 172109,
+ 182334,
+ 192453,
+ 202496,
+ 212599,
+ 222635,
+ 232764,
+ 242814,
+ 252819,
+ 262843,
+ 272899,
+ 283084,
+ 293094,
+ 303310,
+ 313472,
+ 323617,
+ 333784,
+ 343903,
+ 353921,
+ 364085,
+ 374287,
+ 384290,
+ 394296,
+ 404471,
+ 414560,
+ 424686,
+ 434696,
+ 444926,
+ 455104,
+ 465150,
+ 475252,
+ 485342,
+ 495399,
+ 505436,
+ 515696,
+ 525789,
+ 535930,
+ 545945,
+ 555945,
+ 566049,
+ 576122,
+ 586317,
+ 596519,
+ 606748,
+ 616889,
+ 627158,
+ 637260,
+ 647263,
+ 657273,
+ 667473,
+ 677721,
+ 687849,
+ 698027,
+ 708100,
+ 718102,
+ 728312,
+ 738459,
+ 748659,
+ 758695,
+ 768913,
+ 778922,
+ 789065,
+ 799266,
+ 809346,
+ 819530,
+ 829586,
+ 839697,
+ 849860,
+ 860025,
+ 870220,
+ 880415,
+ 890511,
+ 900525,
+ 910705,
+ 920715,
+ 930760,
+ 940766,
+ 950940,
+ 961103,
+ 971321,
+ 981450,
+ 991606,
+ 1001651,
+ 1011710,
+ 1021718,
+ 1031978,
+ 1042077,
+ 1052110,
+ 1062289,
+ 1072420,
+ 1082524,
+ 1092746,
+ 1102920,
+ 1112940,
+ 1122984,
+ 1133088,
+ 1143284,
+ 1153391,
+ 1163395,
+ 1173498,
+ 1183612,
+ 1193776,
+ 1203953,
+ 1213973,
+ 1224024,
+ 1234153,
+ 1244295,
+ 1254356,
+ 1264497,
+ 1274565,
+ 1284604,
+ 1294621,
+ 1304713,
+ 1314851,
+ 1325035,
+ 1335188,
+ 1345253,
+ 1355510,
+ 1365716,
+ 1375874,
+ 1386066,
+ 1396208,
+ 1406386,
+ 1416515,
+ 1426604,
+ 1436650,
+ 1446726,
+ 1456821,
+ 1466930,
+ 1477099,
+ 1487150,
+ 1497352,
+ 1507391,
+ 1517564,
+ 1527624,
+ 1537679,
+ 1547897,
+ 1558095,
+ 1568161,
+ 1578315,
+ 1588334,
+ 1598393,
+ 1608483,
+ 1618592,
+ 1628657,
+ 1638781,
+ 1649029,
+ 1659119,
+ 1669119,
+ 1679375,
+ 1689413,
+ 1699570,
+ 1709768,
+ 1719828,
+ 1729948,
+ 1740177,
+ 1750184,
+ 1760371,
+ 1770392,
+ 1780586,
+ 1790740,
+ 1800756,
+ 1810941,
+ 1820956,
+ 1831037,
+ 1841313,
+ 1851534,
+ 1861656,
+ 1871864,
+ 1882025,
+ 1892117,
+ 1902270,
+ 1912333,
+ 1922467,
+ 1932708,
+ 1942801,
+ 1952927,
+ 1963074,
+ 1973090,
+ 1983234,
+ 1993269,
+ 2003474,
+ 2013561,
+ 2023661,
+ 2033881,
+ 2043963,
+ 2054119,
+ 2064154,
+ 2074351,
+ 2084444,
+ 2094600,
+ 2104748,
+ 2114931,
+ 2124947,
+ 2135163,
+ 2145354,
+ 2155508,
+ 2165692,
+ 2175715,
+ 2185926,
+ 2196148,
+ 2206249,
+ 2216432,
+ 2226606,
+ 2236624,
+ 2246771,
+ 2256780,
+ 2266859,
+ 2276957,
+ 2287104,
+ 2297302,
+ 2307303,
+ 2317390,
+ 2327462,
+ 2337611,
+ 2347768,
+ 2357940,
+ 2368145,
+ 2378307,
+ 2388379,
+ 2398570,
+ 2408757,
+ 2418798,
+ 2428888,
+ 2438913,
+ 2449116,
+ 2459260,
+ 2469434,
+ 2479626,
+ 2489833,
+ 2499991,
+ 2510020,
+ 2520059,
+ 2530270,
+ 2540433,
+ 2550496,
+ 2560624,
+ 2570804,
+ 2580874,
+ 2590993,
+ 2601088,
+ 2611125,
+ 2621242,
+ 2631351,
+ 2641522,
+ 2651675,
+ 2661736,
+ 2671810,
+ 2681946,
+ 2692137,
+ 2702320,
+ 2712415,
+ 2722550,
+ 2732570,
+ 2742690,
+ 2752870,
+ 2762941,
+ 2773018,
+ 2783159,
+ 2793224,
+ 2803361,
+ 2813487,
+ 2823679,
+ 2833919,
+ 2844122,
+ 2854371,
+ 2864515,
+ 2874568,
+ 2884568,
+ 2894772,
+ 2904828,
+ 2914915,
+ 2925139,
+ 2935385,
+ 2945575,
+ 2955784,
+ 2965878,
+ 2976079,
+ 2986111,
+ 2996149,
+ 3006390,
+ 3016621,
+ 3026662,
+ 3036882,
+ 3046930,
+ 3057109,
+ 3067327,
+ 3077504,
+ 3087571,
+ 3097605,
+ 3107719,
+ 3117777,
+ 3128009,
+ 3138168,
+ 3148229,
+ 3158481,
+ 3168634,
+ 3178702,
+ 3188844,
+ 3199082,
+ 3209229,
+ 3219299,
+ 3229532,
+ 3239753,
+ 3249900,
+ 3259919,
+ 3270054,
+ 3280228,
+ 3290271,
+ 3300374,
+ 3310465,
+ 3320623,
+ 3330643,
+ 3340717,
+ 3350810,
+ 3360905,
+ 3371083,
+ 3381150,
+ 3391367,
+ 3401585,
+ 3411654,
+ 3421858,
+ 3431907,
+ 3442063,
+ 3452100,
+ 3462350,
+ 3472490,
+ 3482659,
+ 3492889,
+ 3503095,
+ 3513118,
+ 3523288,
+ 3533471,
+ 3543630,
+ 3553721,
+ 3563794,
+ 3573998,
+ 3584105,
+ 3594290,
+ 3604367,
+ 3614553,
+ 3624782,
+ 3634831,
+ 3644898,
+ 3654942,
+ 3664952,
+ 3675136,
+ 3685260,
+ 3695334,
+ 3705582,
+ 3715735,
+ 3725762,
+ 3735774,
+ 3745999,
+ 3756145,
+ 3766225,
+ 3776477,
+ 3786555,
+ 3796691,
+ 3806809,
+ 3816898,
+ 3826969,
+ 3837146,
+ 3847388,
+ 3857506,
+ 3867569,
+ 3877601,
+ 3887789,
+ 3897865,
+ 3907956,
+ 3918081,
+ 3928149,
+ 3938292,
+ 3948357,
+ 3958542,
+ 3968658,
+ 3978747,
+ 3988818,
+ 3999000,
+ 4009013,
+ 4019014,
+ 4029173,
+ 4039250,
+ 4049428
+ ],
+ "return_mean": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.581779294434297
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.252784914677623
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.334161753553997
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.170426144556203
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.095278323342042
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.885750422736448
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.196690831097673
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.255937324653667
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.131319266487388
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.151241550865958
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.586408147977956
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.321238391599048
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.880002954119709
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.476072178126275
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.171950063599724
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.720468404546768
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 11.047324968407333
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 11.55425538306258
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.390447695948515
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 11.329065050419322
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.616679984139193
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 11.274100546746602
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.35119559789273
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 11.9457037633375
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 11.304112567116627
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.433057314053181
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.407358454292032
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.412571150492305
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.377490982454699
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.557005718331721
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.813813381693372
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.400348686047524
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.892497528185553
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.715679113854522
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.383240616566987
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.91386570609211
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.444228681688914
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.028445034170549
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.368402612520313
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.503505672613203
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.61910025954052
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.956888693780197
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.943356683969368
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.432792673988274
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.351910590415557
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.873724469360948
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.708975943880237
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.53205512609288
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.568714439058335
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.02427198437839
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.582893966185967
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.795905955886854
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.961681586706879
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.688382180676866
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.335861301189954
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.480650327515418
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.87071080114253
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.511726268585317
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.463119566359437
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.175839383088798
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.301841038698587
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.123562160065635
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.520245864900332
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.900195966198565
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.638837869570594
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.09325320534828
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.826710501320592
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.537312954596432
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.607948791605287
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.108074727375653
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.567568137768847
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.467885353956488
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.085386345066548
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.980218558376501
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.657484759539404
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.807401543383056
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.750075025262415
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.208853015155888
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.761273861073171
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.380579274553673
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.055189980683233
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.327598506915491
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.002872500427884
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.450768338292399
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.868517774389769
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.52080597926298
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.901258815201912
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.717117572540893
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.397929013097746
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.555322955811052
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.053563275016824
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.90641807766075
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.436648346044649
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.16285480836572
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.610992133306809
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.924113739551878
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.008463010446455
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.22658281153245
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.155421048779637
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.992987962998155
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.294633256506959
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.374543257897223
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.671272846338034
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.624247338088969
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.595730598545293
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.653308322337091
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.206321018758008
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.625204131456837
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.304137710494352
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.881898823984342
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.403325348904277
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.467838604248774
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.840060340985557
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.963480886923644
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.168400942035955
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.35064809681465
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.625361565282747
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.911836784406598
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.040725386252218
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.184247824054255
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.69676764752565
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.46433038328105
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.118371017060099
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.798434289974495
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.175265157188559
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.027789316726404
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.592768297372013
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.205639054253137
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.825217971693148
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.380201708362302
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.584236518312027
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.222311207187552
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.89124073641761
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.901918107254396
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.88939013567196
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.261248530799689
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.385053189785003
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.236341428791786
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.502378143379929
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.793100404874057
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.125708657030755
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.49814842788024
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.099296848894326
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.165446740542198
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.487021576960139
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.507108326123877
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.003134111954886
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.758894812851606
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.007968636669494
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.785977774268412
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.406371484969679
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.982983102401782
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.29374997139104
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.09444030568106
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.517135910886514
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.386839533467914
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.114754604552804
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.662781855508715
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.451715078754757
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.110536372558007
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.05472929320478
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.800484933907706
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.720562354553364
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.891733954948895
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.377329484855563
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.433120400306668
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.370715378576696
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.670075604309135
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.501116750891459
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.147399244952236
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.655640060224133
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.675102734788048
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.496891270183692
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.194973948677811
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.023462497081889
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.348090415555443
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.157873734991476
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.592806972049656
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.035748218736156
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.45037360445476
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.788181522729252
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.060844059964818
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.968262173925684
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.047975515304653
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.007272827664186
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.457291429770496
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.170278262222897
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.346590440944237
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.12448988887374
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.905106231082499
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.188867421964597
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.391479764307917
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.773937918176559
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.432860797086711
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.891207729657593
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.372720723875442
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.320505503220229
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.190981710861795
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.369807043984569
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.463661609746277
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.51912966955503
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.1912543802924
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.319928819782291
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.971137574358025
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.731914271524241
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.634598220772633
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.99870415699684
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.67640777257902
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.958975943880231
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.980700027272992
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.521463302710302
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.768961295824775
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.238224550610404
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.988769802566337
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.00177847289132
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.761841679140177
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.126462919350192
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.103089396144888
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.691696362770816
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.23861096001857
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.180387668621233
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.256415539138574
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.942031516778586
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.721792563271585
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.806349979954186
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.1900946899475
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.549461035736034
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.52553292777267
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.43257470874959
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.655918136023036
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.03197709680816
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.480510000549295
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.071631122890377
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.847753395311797
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.509667111822145
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.081115772731035
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.862971660679278
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.789623895641993
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.00125441852438
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.74931166833743
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.749485038656433
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.931333624000407
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.613322107176048
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.40700384618906
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.097054049198267
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.365736823456563
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.591263165845021
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.683965076466041
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.0428658009064
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.068969487968218
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.651912099372053
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.317233771684064
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.778205291056153
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.448024093324731
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.771813155445313
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.383162759540522
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.589099386199575
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.876379524221496
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.391636412338846
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.483697401688692
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.449506323725302
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.29308309645112
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.549613092377202
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.381358056055259
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.66493922953745
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.804487137123909
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.601660664506122
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.638283574391068
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.943078890933478
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.212868942589267
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.366222483898877
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.571854712407069
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.488657404228636
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.928157889492482
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.870007449774103
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.930839118065762
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.530514180946867
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.73556806530168
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.222254025853353
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.033507571690157
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.180462278704786
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.870740411419789
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.167473868572714
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.620479926806834
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.313576161924448
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.851659230569046
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.239763447692157
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.373693824759625
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.9827984071199
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.695736520600743
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.97382512197467
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.120828917784038
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.533379501222681
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.970567677475778
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.22820569158164
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.161435608396266
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.201695160636124
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.841338550463925
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.99530655648783
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.654708392044512
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.90057069160453
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.42042695015861
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.682751281876948
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.914610540457655
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.619357341017464
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.342177619964227
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.015146014428078
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.10202248765014
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.594762757309024
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.378776023910817
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.29993565844102
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.428745943248973
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.75967383492857
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.01375704924862
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.731024173483053
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.107231198960946
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.118039798448123
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.608586615639723
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.58595391439283
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.664239384511506
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.391416824588148
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.264244259897561
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.359943345669521
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.836883848752423
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.122777941802505
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.494678628116187
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.442383675675174
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.459256637731965
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.925135917859121
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.407031391038702
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.24392954776179
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.10049091384641
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.591956088248248
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.139542009348826
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.711442126355493
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.66869283719753
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.134509190343449
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.522311516612234
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.350756518133187
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.510982115853073
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.669370928944492
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.483209505755276
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.411071897758434
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.835014396030452
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.104149501288552
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.981854693357688
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.075440140254715
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.126244510847018
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.30408631007603
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.52314892953509
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.80081496191296
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.154364184346452
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.128443542317765
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.929714063921637
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.451547859362915
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.56161111284983
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.406768966598467
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.534432747192893
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.664690585676318
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.226058016689327
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.24261645232033
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.5066354199567
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.073386139758224
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.359919214008782
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.06082762402454
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.524897083578312
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.408103291984204
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.07201969914761
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.976630195959961
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.814766116001337
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.075439548115702
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.762032643971752
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.967342616928047
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.438612745636563
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.505534547040005
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.837039052379582
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.14631854135483
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.246833846005963
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.347813633813146
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.06842084131454
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.66472283063952
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.129729375672596
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.977439972672723
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.834731802270925
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.77721151546178
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.442277124730508
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.03901590315065
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.455195359168371
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.449717028741713
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.426197742867219
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.892227812191923
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.317739851429266
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.79373570682876
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.446026861661108
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.452315738248583
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.694070387790644
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.360388830145157
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.907928225087131
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.225113674722635
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.206279941608976
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.464404554589578
+ }
+ ],
+ "return_mean_T": [
+ 194,
+ 10374,
+ 20428,
+ 30500,
+ 40676,
+ 50873,
+ 61038,
+ 71210,
+ 81230,
+ 91272,
+ 101424,
+ 111431,
+ 121432,
+ 131673,
+ 141819,
+ 151830,
+ 161986,
+ 172109,
+ 182334,
+ 192453,
+ 202496,
+ 212599,
+ 222635,
+ 232764,
+ 242814,
+ 252819,
+ 262843,
+ 272899,
+ 283084,
+ 293094,
+ 303310,
+ 313472,
+ 323617,
+ 333784,
+ 343903,
+ 353921,
+ 364085,
+ 374287,
+ 384290,
+ 394296,
+ 404471,
+ 414560,
+ 424686,
+ 434696,
+ 444926,
+ 455104,
+ 465150,
+ 475252,
+ 485342,
+ 495399,
+ 505436,
+ 515696,
+ 525789,
+ 535930,
+ 545945,
+ 555945,
+ 566049,
+ 576122,
+ 586317,
+ 596519,
+ 606748,
+ 616889,
+ 627158,
+ 637260,
+ 647263,
+ 657273,
+ 667473,
+ 677721,
+ 687849,
+ 698027,
+ 708100,
+ 718102,
+ 728312,
+ 738459,
+ 748659,
+ 758695,
+ 768913,
+ 778922,
+ 789065,
+ 799266,
+ 809346,
+ 819530,
+ 829586,
+ 839697,
+ 849860,
+ 860025,
+ 870220,
+ 880415,
+ 890511,
+ 900525,
+ 910705,
+ 920715,
+ 930760,
+ 940766,
+ 950940,
+ 961103,
+ 971321,
+ 981450,
+ 991606,
+ 1001651,
+ 1011710,
+ 1021718,
+ 1031978,
+ 1042077,
+ 1052110,
+ 1062289,
+ 1072420,
+ 1082524,
+ 1092746,
+ 1102920,
+ 1112940,
+ 1122984,
+ 1133088,
+ 1143284,
+ 1153391,
+ 1163395,
+ 1173498,
+ 1183612,
+ 1193776,
+ 1203953,
+ 1213973,
+ 1224024,
+ 1234153,
+ 1244295,
+ 1254356,
+ 1264497,
+ 1274565,
+ 1284604,
+ 1294621,
+ 1304713,
+ 1314851,
+ 1325035,
+ 1335188,
+ 1345253,
+ 1355510,
+ 1365716,
+ 1375874,
+ 1386066,
+ 1396208,
+ 1406386,
+ 1416515,
+ 1426604,
+ 1436650,
+ 1446726,
+ 1456821,
+ 1466930,
+ 1477099,
+ 1487150,
+ 1497352,
+ 1507391,
+ 1517564,
+ 1527624,
+ 1537679,
+ 1547897,
+ 1558095,
+ 1568161,
+ 1578315,
+ 1588334,
+ 1598393,
+ 1608483,
+ 1618592,
+ 1628657,
+ 1638781,
+ 1649029,
+ 1659119,
+ 1669119,
+ 1679375,
+ 1689413,
+ 1699570,
+ 1709768,
+ 1719828,
+ 1729948,
+ 1740177,
+ 1750184,
+ 1760371,
+ 1770392,
+ 1780586,
+ 1790740,
+ 1800756,
+ 1810941,
+ 1820956,
+ 1831037,
+ 1841313,
+ 1851534,
+ 1861656,
+ 1871864,
+ 1882025,
+ 1892117,
+ 1902270,
+ 1912333,
+ 1922467,
+ 1932708,
+ 1942801,
+ 1952927,
+ 1963074,
+ 1973090,
+ 1983234,
+ 1993269,
+ 2003474,
+ 2013561,
+ 2023661,
+ 2033881,
+ 2043963,
+ 2054119,
+ 2064154,
+ 2074351,
+ 2084444,
+ 2094600,
+ 2104748,
+ 2114931,
+ 2124947,
+ 2135163,
+ 2145354,
+ 2155508,
+ 2165692,
+ 2175715,
+ 2185926,
+ 2196148,
+ 2206249,
+ 2216432,
+ 2226606,
+ 2236624,
+ 2246771,
+ 2256780,
+ 2266859,
+ 2276957,
+ 2287104,
+ 2297302,
+ 2307303,
+ 2317390,
+ 2327462,
+ 2337611,
+ 2347768,
+ 2357940,
+ 2368145,
+ 2378307,
+ 2388379,
+ 2398570,
+ 2408757,
+ 2418798,
+ 2428888,
+ 2438913,
+ 2449116,
+ 2459260,
+ 2469434,
+ 2479626,
+ 2489833,
+ 2499991,
+ 2510020,
+ 2520059,
+ 2530270,
+ 2540433,
+ 2550496,
+ 2560624,
+ 2570804,
+ 2580874,
+ 2590993,
+ 2601088,
+ 2611125,
+ 2621242,
+ 2631351,
+ 2641522,
+ 2651675,
+ 2661736,
+ 2671810,
+ 2681946,
+ 2692137,
+ 2702320,
+ 2712415,
+ 2722550,
+ 2732570,
+ 2742690,
+ 2752870,
+ 2762941,
+ 2773018,
+ 2783159,
+ 2793224,
+ 2803361,
+ 2813487,
+ 2823679,
+ 2833919,
+ 2844122,
+ 2854371,
+ 2864515,
+ 2874568,
+ 2884568,
+ 2894772,
+ 2904828,
+ 2914915,
+ 2925139,
+ 2935385,
+ 2945575,
+ 2955784,
+ 2965878,
+ 2976079,
+ 2986111,
+ 2996149,
+ 3006390,
+ 3016621,
+ 3026662,
+ 3036882,
+ 3046930,
+ 3057109,
+ 3067327,
+ 3077504,
+ 3087571,
+ 3097605,
+ 3107719,
+ 3117777,
+ 3128009,
+ 3138168,
+ 3148229,
+ 3158481,
+ 3168634,
+ 3178702,
+ 3188844,
+ 3199082,
+ 3209229,
+ 3219299,
+ 3229532,
+ 3239753,
+ 3249900,
+ 3259919,
+ 3270054,
+ 3280228,
+ 3290271,
+ 3300374,
+ 3310465,
+ 3320623,
+ 3330643,
+ 3340717,
+ 3350810,
+ 3360905,
+ 3371083,
+ 3381150,
+ 3391367,
+ 3401585,
+ 3411654,
+ 3421858,
+ 3431907,
+ 3442063,
+ 3452100,
+ 3462350,
+ 3472490,
+ 3482659,
+ 3492889,
+ 3503095,
+ 3513118,
+ 3523288,
+ 3533471,
+ 3543630,
+ 3553721,
+ 3563794,
+ 3573998,
+ 3584105,
+ 3594290,
+ 3604367,
+ 3614553,
+ 3624782,
+ 3634831,
+ 3644898,
+ 3654942,
+ 3664952,
+ 3675136,
+ 3685260,
+ 3695334,
+ 3705582,
+ 3715735,
+ 3725762,
+ 3735774,
+ 3745999,
+ 3756145,
+ 3766225,
+ 3776477,
+ 3786555,
+ 3796691,
+ 3806809,
+ 3816898,
+ 3826969,
+ 3837146,
+ 3847388,
+ 3857506,
+ 3867569,
+ 3877601,
+ 3887789,
+ 3897865,
+ 3907956,
+ 3918081,
+ 3928149,
+ 3938292,
+ 3948357,
+ 3958542,
+ 3968658,
+ 3978747,
+ 3988818,
+ 3999000,
+ 4009013,
+ 4019014,
+ 4029173,
+ 4039250,
+ 4049428
+ ],
+ "return_min": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.3834951456310676
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.320388349514563
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.7111650485436891
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.7747524752475248
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.217821782178218
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.310679611650485
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.0097087378640768
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.5693069306930694
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.73019801980198
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.8886138613861385
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.7079207920792077
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.16747572815534
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.4801980198019802
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.485436893203883
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.512135922330097
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.466019417475728
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.049504950495049
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.385922330097087
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.929611650485436
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.7378640776699026
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.4368932038834945
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.8316831683168324
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.0693069306930694
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.839805825242718
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.725728155339805
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.1138613861386144
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.126213592233009
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.185643564356436
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.168316831683169
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.044554455445544
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.990099009900991
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.861386138613862
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.8058252427184462
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.7821782178217833
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.482673267326733
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.407766990291261
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.861386138613863
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.4504950495049505
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.485436893203882
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.1039603960396045
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.596534653465347
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.2277227722772284
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.643564356435645
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.0
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.990099009900992
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.757281553398057
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.032178217821785
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.668316831683168
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.334158415841584
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.0074257425742585
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.475728155339805
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.168316831683168
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.696601941747572
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.8440594059405937
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.519417475728154
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.316831683168318
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.4851485148514856
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.708737864077668
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.038834951456311
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.7920792079207915
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.683168316831685
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.684466019417474
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.5990099009900995
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.185643564356438
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.111386138613863
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.0995145631067955
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.542079207920794
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.410194174757281
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.861650485436891
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.087378640776698
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.400485436893203
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.067961165048542
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.910891089108912
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.9331683168316856
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.237864077669903
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.9126213592233
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.532178217821783
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.089108910891088
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.675742574257427
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.623762376237624
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.50990099009901
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.507425742574258
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.6868932038834945
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.358910891089109
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.058252427184466
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.951456310679611
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.208737864077669
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.482673267326734
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.314356435643565
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.198019801980198
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.427184466019416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.201456310679611
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.543689320388348
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.405940594059407
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.970873786407766
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.5396039603960396
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.072815533980582
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.696601941747572
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.02970297029703
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.766990291262135
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.126213592233008
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.677184466019416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.783980582524271
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.635922330097086
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.3567961165048534
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.4529702970297023
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.032178217821782
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.0
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.6699029126213585
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.554455445544554
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.434466019417475
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.1336633663366333
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.606796116504854
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.834951456310678
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.396039603960396
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.5643564356435644
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.024752475247525
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.2252475247524752
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.2574257425742585
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.27970297029703
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.257425742574257
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.336633663366339
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.633663366336635
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.233009708737862
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.2549504950495045
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.524271844660193
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.514851485148515
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.689320388349514
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.87135922330097
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.388349514563105
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.495049504950495
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.16831683168317
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.359223300970872
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.713592233009708
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.9702970297029703
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.448019801980199
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.638349514563106
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.81930693069307
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.378640776699028
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.446601941747572
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.1359223300970855
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.555825242718445
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.004854368932039
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.450495049504952
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.55940594059406
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.3638613861386135
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.995049504950496
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.903465346534656
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.3033980582524265
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.173267326732672
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.752427184466018
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.0
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.2574257425742577
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.391089108910893
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.297029702970298
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.8349514563106775
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.851485148514851
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.2111650485436884
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.133495145631067
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.20145631067961
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.195544554455447
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.0703883495145625
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.805825242718445
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.048543689320387
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.594059405940595
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.854368932038834
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.749999999999998
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.856796116504853
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.386138613861388
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.975247524752478
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.556930693069309
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.561881188118813
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.584951456310679
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.0
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.990099009900991
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.935643564356435
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.543689320388348
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.873786407766989
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.864077669902911
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.300970873786406
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.6881188118811883
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.980582524271844
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.63861386138614
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.961165048543688
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.861386138613862
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.522277227722773
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.747572815533979
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.490291262135921
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.6116504854368925
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.888613861386141
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.475728155339805
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.5679611650485428
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.663366336633664
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.584158415841585
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.1485148514851495
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.2574257425742577
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.565533980582523
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.62135922330097
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.145631067961164
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.067961165048542
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.592233009708737
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.900990099009902
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.320388349514562
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.910891089108911
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.53398058252427
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.435643564356436
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.45145631067961
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.987864077669902
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.458737864077668
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.237864077669902
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.791262135922329
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.133495145631067
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.296116504854369
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.183168316831685
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.2038834951456305
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.4029126213592225
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.553398058252427
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.138613861386139
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.683168316831685
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.376213592233008
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.650485436893202
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.922330097087377
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.174757281553396
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.126237623762377
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.859223300970873
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.504950495049506
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.198019801980198
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.3592233009708727
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.7896039603960396
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.983009708737862
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.07920792079208
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.446601941747571
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.9777227722772284
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.997572815533979
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.7451456310679605
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.555825242718445
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.056930693069307
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.6771844660194155
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.445544554455446
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.631067961165047
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.924757281553396
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.5097087378640768
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.341584158415842
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.400485436893202
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.3861386138613865
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.3446601941747565
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.737623762376238
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.08009708737864
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.985436893203882
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.660891089108912
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.702970297029704
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.78640776699029
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.9653465346534675
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.1831683168316838
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.62378640776699
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.638349514563106
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.435643564356438
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.262135922330096
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.534653465346537
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.291262135922329
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.3465346534653464
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.331683168316832
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.069306930693069
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.427184466019416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.16504854368932
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.98019801980198
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.303398058252426
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.38861386138614
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.056930693069307
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.33910891089109
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.589108910891091
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.963592233009708
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.145631067961164
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.269801980198021
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.376237623762376
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.235148514851486
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.257281553398057
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.929611650485436
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.439320388349513
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.091584158415843
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.300970873786406
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.25742574257425743
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.820388349514562
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.881188118811882
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.157766990291261
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.524271844660193
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.024752475247524
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.522277227722774
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.933168316831684
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.6485148514851495
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.814356435643566
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.699029126213591
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.359223300970872
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.184466019417474
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.946601941747572
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.592233009708736
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.106435643564358
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.655940594059407
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.521844660194174
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.027227722772278
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.5717821782178225
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.831683168316832
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.0
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.1039603960396045
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.121287128712873
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.961165048543687
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.405940594059406
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.322815533980582
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.347087378640776
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.946601941747571
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.15841584158416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.905940594059409
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.524752475247525
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.277227722772278
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.218446601941746
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.262376237623762
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.184466019417474
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.466019417475727
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.864077669902911
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.378640776699028
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.886138613861387
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.247524752475248
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.038834951456311
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.3886138613861405
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.553398058252426
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.4854368932038815
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.948019801980199
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.300970873786406
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.2673267326732685
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.834951456310678
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.163366336633664
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.247524752475247
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.0742574257425748
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.601485148514852
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.9108910891089135
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.631067961165046
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.36650485436893
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.055825242718446
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.613861386138615
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.992574257425744
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.373786407766989
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.834951456310678
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.699029126213591
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.172330097087377
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.732673267326736
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.331683168316833
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.298543689320387
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.271844660194174
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.747524752475249
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.3564356435643585
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.193069306930694
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.483009708737862
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.839108910891092
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.388349514563106
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.203883495145629
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.650485436893202
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.0618811881188135
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.458737864077669
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.468446601941746
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.038834951456309
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.024752475247525
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.514851485148517
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.36881188118812
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.300970873786406
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.990099009900993
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.883663366336633
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.6990291262135923
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.8217821782178218
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.257425742574259
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.033980582524271
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.958737864077668
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.831683168316832
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.306930693069308
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.396039603960397
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.754854368932037
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.019417475728154
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.788834951456309
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.752475247524754
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.567961165048541
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.902912621359222
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.190594059405941
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.504950495049507
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.458737864077668
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.847087378640775
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.3539603960396045
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.544554455445548
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.688118811881189
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.126213592233008
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.6601941747572795
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.257281553398056
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.336633663366338
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.933168316831685
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.900485436893202
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.060679611650484
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.8689320388349504
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.589805825242718
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.93069306930693
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.320388349514562
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.436893203883494
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.544554455445546
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.750000000000001
+ }
+ ],
+ "return_min_T": [
+ 194,
+ 10374,
+ 20428,
+ 30500,
+ 40676,
+ 50873,
+ 61038,
+ 71210,
+ 81230,
+ 91272,
+ 101424,
+ 111431,
+ 121432,
+ 131673,
+ 141819,
+ 151830,
+ 161986,
+ 172109,
+ 182334,
+ 192453,
+ 202496,
+ 212599,
+ 222635,
+ 232764,
+ 242814,
+ 252819,
+ 262843,
+ 272899,
+ 283084,
+ 293094,
+ 303310,
+ 313472,
+ 323617,
+ 333784,
+ 343903,
+ 353921,
+ 364085,
+ 374287,
+ 384290,
+ 394296,
+ 404471,
+ 414560,
+ 424686,
+ 434696,
+ 444926,
+ 455104,
+ 465150,
+ 475252,
+ 485342,
+ 495399,
+ 505436,
+ 515696,
+ 525789,
+ 535930,
+ 545945,
+ 555945,
+ 566049,
+ 576122,
+ 586317,
+ 596519,
+ 606748,
+ 616889,
+ 627158,
+ 637260,
+ 647263,
+ 657273,
+ 667473,
+ 677721,
+ 687849,
+ 698027,
+ 708100,
+ 718102,
+ 728312,
+ 738459,
+ 748659,
+ 758695,
+ 768913,
+ 778922,
+ 789065,
+ 799266,
+ 809346,
+ 819530,
+ 829586,
+ 839697,
+ 849860,
+ 860025,
+ 870220,
+ 880415,
+ 890511,
+ 900525,
+ 910705,
+ 920715,
+ 930760,
+ 940766,
+ 950940,
+ 961103,
+ 971321,
+ 981450,
+ 991606,
+ 1001651,
+ 1011710,
+ 1021718,
+ 1031978,
+ 1042077,
+ 1052110,
+ 1062289,
+ 1072420,
+ 1082524,
+ 1092746,
+ 1102920,
+ 1112940,
+ 1122984,
+ 1133088,
+ 1143284,
+ 1153391,
+ 1163395,
+ 1173498,
+ 1183612,
+ 1193776,
+ 1203953,
+ 1213973,
+ 1224024,
+ 1234153,
+ 1244295,
+ 1254356,
+ 1264497,
+ 1274565,
+ 1284604,
+ 1294621,
+ 1304713,
+ 1314851,
+ 1325035,
+ 1335188,
+ 1345253,
+ 1355510,
+ 1365716,
+ 1375874,
+ 1386066,
+ 1396208,
+ 1406386,
+ 1416515,
+ 1426604,
+ 1436650,
+ 1446726,
+ 1456821,
+ 1466930,
+ 1477099,
+ 1487150,
+ 1497352,
+ 1507391,
+ 1517564,
+ 1527624,
+ 1537679,
+ 1547897,
+ 1558095,
+ 1568161,
+ 1578315,
+ 1588334,
+ 1598393,
+ 1608483,
+ 1618592,
+ 1628657,
+ 1638781,
+ 1649029,
+ 1659119,
+ 1669119,
+ 1679375,
+ 1689413,
+ 1699570,
+ 1709768,
+ 1719828,
+ 1729948,
+ 1740177,
+ 1750184,
+ 1760371,
+ 1770392,
+ 1780586,
+ 1790740,
+ 1800756,
+ 1810941,
+ 1820956,
+ 1831037,
+ 1841313,
+ 1851534,
+ 1861656,
+ 1871864,
+ 1882025,
+ 1892117,
+ 1902270,
+ 1912333,
+ 1922467,
+ 1932708,
+ 1942801,
+ 1952927,
+ 1963074,
+ 1973090,
+ 1983234,
+ 1993269,
+ 2003474,
+ 2013561,
+ 2023661,
+ 2033881,
+ 2043963,
+ 2054119,
+ 2064154,
+ 2074351,
+ 2084444,
+ 2094600,
+ 2104748,
+ 2114931,
+ 2124947,
+ 2135163,
+ 2145354,
+ 2155508,
+ 2165692,
+ 2175715,
+ 2185926,
+ 2196148,
+ 2206249,
+ 2216432,
+ 2226606,
+ 2236624,
+ 2246771,
+ 2256780,
+ 2266859,
+ 2276957,
+ 2287104,
+ 2297302,
+ 2307303,
+ 2317390,
+ 2327462,
+ 2337611,
+ 2347768,
+ 2357940,
+ 2368145,
+ 2378307,
+ 2388379,
+ 2398570,
+ 2408757,
+ 2418798,
+ 2428888,
+ 2438913,
+ 2449116,
+ 2459260,
+ 2469434,
+ 2479626,
+ 2489833,
+ 2499991,
+ 2510020,
+ 2520059,
+ 2530270,
+ 2540433,
+ 2550496,
+ 2560624,
+ 2570804,
+ 2580874,
+ 2590993,
+ 2601088,
+ 2611125,
+ 2621242,
+ 2631351,
+ 2641522,
+ 2651675,
+ 2661736,
+ 2671810,
+ 2681946,
+ 2692137,
+ 2702320,
+ 2712415,
+ 2722550,
+ 2732570,
+ 2742690,
+ 2752870,
+ 2762941,
+ 2773018,
+ 2783159,
+ 2793224,
+ 2803361,
+ 2813487,
+ 2823679,
+ 2833919,
+ 2844122,
+ 2854371,
+ 2864515,
+ 2874568,
+ 2884568,
+ 2894772,
+ 2904828,
+ 2914915,
+ 2925139,
+ 2935385,
+ 2945575,
+ 2955784,
+ 2965878,
+ 2976079,
+ 2986111,
+ 2996149,
+ 3006390,
+ 3016621,
+ 3026662,
+ 3036882,
+ 3046930,
+ 3057109,
+ 3067327,
+ 3077504,
+ 3087571,
+ 3097605,
+ 3107719,
+ 3117777,
+ 3128009,
+ 3138168,
+ 3148229,
+ 3158481,
+ 3168634,
+ 3178702,
+ 3188844,
+ 3199082,
+ 3209229,
+ 3219299,
+ 3229532,
+ 3239753,
+ 3249900,
+ 3259919,
+ 3270054,
+ 3280228,
+ 3290271,
+ 3300374,
+ 3310465,
+ 3320623,
+ 3330643,
+ 3340717,
+ 3350810,
+ 3360905,
+ 3371083,
+ 3381150,
+ 3391367,
+ 3401585,
+ 3411654,
+ 3421858,
+ 3431907,
+ 3442063,
+ 3452100,
+ 3462350,
+ 3472490,
+ 3482659,
+ 3492889,
+ 3503095,
+ 3513118,
+ 3523288,
+ 3533471,
+ 3543630,
+ 3553721,
+ 3563794,
+ 3573998,
+ 3584105,
+ 3594290,
+ 3604367,
+ 3614553,
+ 3624782,
+ 3634831,
+ 3644898,
+ 3654942,
+ 3664952,
+ 3675136,
+ 3685260,
+ 3695334,
+ 3705582,
+ 3715735,
+ 3725762,
+ 3735774,
+ 3745999,
+ 3756145,
+ 3766225,
+ 3776477,
+ 3786555,
+ 3796691,
+ 3806809,
+ 3816898,
+ 3826969,
+ 3837146,
+ 3847388,
+ 3857506,
+ 3867569,
+ 3877601,
+ 3887789,
+ 3897865,
+ 3907956,
+ 3918081,
+ 3928149,
+ 3938292,
+ 3948357,
+ 3958542,
+ 3968658,
+ 3978747,
+ 3988818,
+ 3999000,
+ 4009013,
+ 4019014,
+ 4029173,
+ 4039250,
+ 4049428
+ ],
+ "return_std": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.5880169212126636
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.1216904123997486
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.356128246352253
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.5139566599664493
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.8263411745463283
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.187341214556344
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.632016426635802
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.9880546510307644
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.758465185071705
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.487132212316425
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.817037930353118
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.199762169409591
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.009963725231867
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.897677829201199
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.71622152119678
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.923898482626088
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.437294211472871
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.323178572233951
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.977893187851492
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.579203768010094
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.765788046942767
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.110020857633964
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.321391370079828
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.468543366892383
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.811323305338234
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.996786187701629
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.836853953243447
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.678048590737435
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.143668787530503
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.214911878360981
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.281601597976186
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.957360902345843
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.040456250002856
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.326524379353837
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.2748508132927014
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.910157215310893
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.1846180842149865
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.799823093072245
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.886600781101114
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.057948865283936
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.179431996739207
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.890259423283988
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.683388718913342
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.401556001499975
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.134045236532697
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.237756576908622
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.202032598090279
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.807267072714448
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.286401258892511
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.145083087514467
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.985688071980274
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.091534706621925
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.738131603411186
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.988579616044543
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.947533092472835
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.208474174480193
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.084593871866964
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.933664640956228
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.464481149118866
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.073081429880954
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.787959754270194
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.927723147781015
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.726438005688073
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.405612068066896
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.05208002110984
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.927444761456401
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.994528525163298
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.399191561020055
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.619806071469864
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.5384004011874755
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.856384293785262
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.992854125921711
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.306016471400462
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.706457767614833
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.85945952896351
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.087502619851587
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.057077129797766
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.003871854428297
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.905847920837072
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.933213976570083
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.355265697754794
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.697797640877019
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.2590977701030655
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.170933998926724
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.651543421117933
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.189225043326061
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.778074623733829
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.332003940674125
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.612596918382722
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.155893390430048
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.023345990373234
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.020724011545547
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.281483831655749
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.4117545720470215
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.973986144796282
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.614826932884734
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.282861536833945
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.465484530503834
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.988517180967153
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.09216570052309
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.322744505260272
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.772617607138144
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.870397760105417
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.566105024371627
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.045242527268527
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.740457161243256
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.616677754056598
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.202300222125345
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.178877366301476
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.050010999163639
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.375763988608549
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.926664303428453
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.2784721894946305
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.147032590473291
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.1038656076551545
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.965368165475164
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.526478113923491
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.425134038556064
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.040046627536972
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.18287161954052
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.834360250443749
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.4887503485186615
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.180041065580743
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.109141843341002
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.641175097209241
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.8417247586283425
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.717537943368313
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.153945846914122
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.983407641872006
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.810534434404208
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.622452683418292
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.802264703863381
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.890396801756642
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.137593228554352
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.477348544056058
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.86986958376339
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.952207987559108
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.631464116767191
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.011409968187926
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.061019292794523
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.639227842946944
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.588755491125725
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.666326991192057
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.269967867535247
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.877855925131011
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.714862761366802
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.250768034666533
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.213287337819375
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.815811395863157
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.392513930036217
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.759517883856781
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.285714783798533
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.698145505382315
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.716801460411743
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.566500077219675
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.380113244333406
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.040905141430304
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.054353048216515
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.889580127163237
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.131554575106792
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.905165453744069
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.668042052896021
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.301573777403068
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.799262628862984
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.024734425364274
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.009594045394512
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.677420656746053
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.032648102201195
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.876432858165473
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.213862936262912
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.315422497817043
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.953074711633309
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.788388991734677
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.912323058816291
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.177718866291995
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.192893264967941
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.729534227554158
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.2099729962759325
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.674169975284033
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.041280212061058
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.7948031300636655
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.275653226950289
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.507085866349478
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.958806927726803
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.271478751798492
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.970977534600806
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.850345671792708
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.704318186395115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.6868278093221
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.989330672958888
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.111885825695472
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.594739649539404
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.88954065905242
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.952700183695007
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.577069640463965
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.856966286243436
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.293151258230689
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.1209295522294465
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.872777563266504
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.729952712388444
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.893054692034658
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.82817208342767
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.145215741668431
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.626631890943172
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.943830910789371
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.029335834432154
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.3223167175876975
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.268057921220968
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.724549231665999
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.0001722068337955
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.815846989576582
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.9647858409368055
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.761060247676914
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.14231832369958
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.748011531364534
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.320265368056237
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.9019792902554995
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.25307772709931
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.618762813154929
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.769602274869133
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.2346092063359855
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.102846338293436
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.1433808633993205
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.9859174056330895
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.262645585779118
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.116190005697538
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.206586597610375
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.205589094655088
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.7072306827906285
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.680709039755243
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.335696170239356
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.926852723342151
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.900319327131658
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.725108534814423
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.670847921639774
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.85064755355707
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.2416317649276305
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.510754885770588
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.945963198303477
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.0844140650919085
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.985095671246702
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.238003832321645
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.941393245940287
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.837224869887537
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.127902521251683
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.398785292660537
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.011677395425703
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.02869366967087
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.746521312177693
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.995554053980314
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.240316672562432
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.771099748533079
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.0841208218611635
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.181791780264795
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.389579265857693
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.126812831840004
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.639077215794168
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.8903119948184495
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.1016171762860685
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.6564431133918
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.169423030315964
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.781720648159808
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.844609221638168
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.787956003825343
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.2572011913858
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.090999069215906
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.600220059998065
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.678343686652092
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.842246665462265
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.977771726059551
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.564516924102895
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.270569622182949
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.075493221863354
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.093426659335908
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.1141375528198205
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.176285079105844
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.920676143496432
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.196197576182032
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.254978868638274
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.636877984128015
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.828191255365449
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.1558146026202785
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.3352187215357985
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.758950443909064
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.028422421356478
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.9275400106809
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.309467652410766
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.872625477704868
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.341047061444675
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.9746026932161325
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.608230121034649
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.924832883139942
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.35179293758415
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.276223659188932
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.086147387875874
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.595877932280172
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.6026395633553205
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.188410179895538
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.030258885665461
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.085997742302305
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.8002625548385955
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.55181734588214
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.439378670232491
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.30932622447203
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.07731924518487
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.083400338339993
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.019708756857491
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.961851363423986
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.455673221493154
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.654260208665453
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.998551601137394
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.9695466622227435
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.849594065145345
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.792106358932065
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.622955101906102
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.163393108206504
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.568502310463618
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.019146408972888
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.863803461858809
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.479700730229577
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.736650311826673
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.502414948468356
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.1908366273267115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.119689089570448
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.1662054539696545
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.758585397245201
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.199232015683044
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.79950329101396
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.916910483327194
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.9390904579426635
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.712560885551489
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.573740939137966
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.965667872024541
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.3958992202416685
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.311817711354636
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.756760875862128
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.119610140546905
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.283917106935254
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.949193884937589
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.955895378267114
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.315127505217133
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.2369492586543895
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.747898239567152
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.365000571819041
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.10726066774919
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.983205695676415
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.618768210631988
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.284888891709589
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.078912180657046
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.288976301358585
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.868362635073418
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.750996778449335
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.067467179141872
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.948065721205803
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.294985228086764
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.472743123759164
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.2133370133119525
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.78919653189484
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.363444152324845
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.349295430567005
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.941742824254209
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.980896415605638
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.809565514280653
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.8972823071245655
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.344220193954268
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.311129312307862
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.357974730066742
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.540023632482415
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.916767713270787
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.6736739989842455
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.969383376605188
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.490443861467016
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.420395453609239
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.472378906104464
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.839307933772527
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.275632525667595
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.839015380611474
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.163765567350908
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.036776278457445
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.803323838499884
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.029538890588375
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.489595384858156
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.426653606799766
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.092947394753486
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.958042503217533
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.94772590450071
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.518567027236927
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.182805659946843
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.149598179737611
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.728423143292867
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.843805804888602
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.739547497578703
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.592568419084039
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.488648571688827
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.728751675282037
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.797186991887531
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.529675331894973
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.87908836811765
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.517411311410208
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.012213469994597
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.039494360309248
+ }
+ ],
+ "return_std_T": [
+ 194,
+ 10374,
+ 20428,
+ 30500,
+ 40676,
+ 50873,
+ 61038,
+ 71210,
+ 81230,
+ 91272,
+ 101424,
+ 111431,
+ 121432,
+ 131673,
+ 141819,
+ 151830,
+ 161986,
+ 172109,
+ 182334,
+ 192453,
+ 202496,
+ 212599,
+ 222635,
+ 232764,
+ 242814,
+ 252819,
+ 262843,
+ 272899,
+ 283084,
+ 293094,
+ 303310,
+ 313472,
+ 323617,
+ 333784,
+ 343903,
+ 353921,
+ 364085,
+ 374287,
+ 384290,
+ 394296,
+ 404471,
+ 414560,
+ 424686,
+ 434696,
+ 444926,
+ 455104,
+ 465150,
+ 475252,
+ 485342,
+ 495399,
+ 505436,
+ 515696,
+ 525789,
+ 535930,
+ 545945,
+ 555945,
+ 566049,
+ 576122,
+ 586317,
+ 596519,
+ 606748,
+ 616889,
+ 627158,
+ 637260,
+ 647263,
+ 657273,
+ 667473,
+ 677721,
+ 687849,
+ 698027,
+ 708100,
+ 718102,
+ 728312,
+ 738459,
+ 748659,
+ 758695,
+ 768913,
+ 778922,
+ 789065,
+ 799266,
+ 809346,
+ 819530,
+ 829586,
+ 839697,
+ 849860,
+ 860025,
+ 870220,
+ 880415,
+ 890511,
+ 900525,
+ 910705,
+ 920715,
+ 930760,
+ 940766,
+ 950940,
+ 961103,
+ 971321,
+ 981450,
+ 991606,
+ 1001651,
+ 1011710,
+ 1021718,
+ 1031978,
+ 1042077,
+ 1052110,
+ 1062289,
+ 1072420,
+ 1082524,
+ 1092746,
+ 1102920,
+ 1112940,
+ 1122984,
+ 1133088,
+ 1143284,
+ 1153391,
+ 1163395,
+ 1173498,
+ 1183612,
+ 1193776,
+ 1203953,
+ 1213973,
+ 1224024,
+ 1234153,
+ 1244295,
+ 1254356,
+ 1264497,
+ 1274565,
+ 1284604,
+ 1294621,
+ 1304713,
+ 1314851,
+ 1325035,
+ 1335188,
+ 1345253,
+ 1355510,
+ 1365716,
+ 1375874,
+ 1386066,
+ 1396208,
+ 1406386,
+ 1416515,
+ 1426604,
+ 1436650,
+ 1446726,
+ 1456821,
+ 1466930,
+ 1477099,
+ 1487150,
+ 1497352,
+ 1507391,
+ 1517564,
+ 1527624,
+ 1537679,
+ 1547897,
+ 1558095,
+ 1568161,
+ 1578315,
+ 1588334,
+ 1598393,
+ 1608483,
+ 1618592,
+ 1628657,
+ 1638781,
+ 1649029,
+ 1659119,
+ 1669119,
+ 1679375,
+ 1689413,
+ 1699570,
+ 1709768,
+ 1719828,
+ 1729948,
+ 1740177,
+ 1750184,
+ 1760371,
+ 1770392,
+ 1780586,
+ 1790740,
+ 1800756,
+ 1810941,
+ 1820956,
+ 1831037,
+ 1841313,
+ 1851534,
+ 1861656,
+ 1871864,
+ 1882025,
+ 1892117,
+ 1902270,
+ 1912333,
+ 1922467,
+ 1932708,
+ 1942801,
+ 1952927,
+ 1963074,
+ 1973090,
+ 1983234,
+ 1993269,
+ 2003474,
+ 2013561,
+ 2023661,
+ 2033881,
+ 2043963,
+ 2054119,
+ 2064154,
+ 2074351,
+ 2084444,
+ 2094600,
+ 2104748,
+ 2114931,
+ 2124947,
+ 2135163,
+ 2145354,
+ 2155508,
+ 2165692,
+ 2175715,
+ 2185926,
+ 2196148,
+ 2206249,
+ 2216432,
+ 2226606,
+ 2236624,
+ 2246771,
+ 2256780,
+ 2266859,
+ 2276957,
+ 2287104,
+ 2297302,
+ 2307303,
+ 2317390,
+ 2327462,
+ 2337611,
+ 2347768,
+ 2357940,
+ 2368145,
+ 2378307,
+ 2388379,
+ 2398570,
+ 2408757,
+ 2418798,
+ 2428888,
+ 2438913,
+ 2449116,
+ 2459260,
+ 2469434,
+ 2479626,
+ 2489833,
+ 2499991,
+ 2510020,
+ 2520059,
+ 2530270,
+ 2540433,
+ 2550496,
+ 2560624,
+ 2570804,
+ 2580874,
+ 2590993,
+ 2601088,
+ 2611125,
+ 2621242,
+ 2631351,
+ 2641522,
+ 2651675,
+ 2661736,
+ 2671810,
+ 2681946,
+ 2692137,
+ 2702320,
+ 2712415,
+ 2722550,
+ 2732570,
+ 2742690,
+ 2752870,
+ 2762941,
+ 2773018,
+ 2783159,
+ 2793224,
+ 2803361,
+ 2813487,
+ 2823679,
+ 2833919,
+ 2844122,
+ 2854371,
+ 2864515,
+ 2874568,
+ 2884568,
+ 2894772,
+ 2904828,
+ 2914915,
+ 2925139,
+ 2935385,
+ 2945575,
+ 2955784,
+ 2965878,
+ 2976079,
+ 2986111,
+ 2996149,
+ 3006390,
+ 3016621,
+ 3026662,
+ 3036882,
+ 3046930,
+ 3057109,
+ 3067327,
+ 3077504,
+ 3087571,
+ 3097605,
+ 3107719,
+ 3117777,
+ 3128009,
+ 3138168,
+ 3148229,
+ 3158481,
+ 3168634,
+ 3178702,
+ 3188844,
+ 3199082,
+ 3209229,
+ 3219299,
+ 3229532,
+ 3239753,
+ 3249900,
+ 3259919,
+ 3270054,
+ 3280228,
+ 3290271,
+ 3300374,
+ 3310465,
+ 3320623,
+ 3330643,
+ 3340717,
+ 3350810,
+ 3360905,
+ 3371083,
+ 3381150,
+ 3391367,
+ 3401585,
+ 3411654,
+ 3421858,
+ 3431907,
+ 3442063,
+ 3452100,
+ 3462350,
+ 3472490,
+ 3482659,
+ 3492889,
+ 3503095,
+ 3513118,
+ 3523288,
+ 3533471,
+ 3543630,
+ 3553721,
+ 3563794,
+ 3573998,
+ 3584105,
+ 3594290,
+ 3604367,
+ 3614553,
+ 3624782,
+ 3634831,
+ 3644898,
+ 3654942,
+ 3664952,
+ 3675136,
+ 3685260,
+ 3695334,
+ 3705582,
+ 3715735,
+ 3725762,
+ 3735774,
+ 3745999,
+ 3756145,
+ 3766225,
+ 3776477,
+ 3786555,
+ 3796691,
+ 3806809,
+ 3816898,
+ 3826969,
+ 3837146,
+ 3847388,
+ 3857506,
+ 3867569,
+ 3877601,
+ 3887789,
+ 3897865,
+ 3907956,
+ 3918081,
+ 3928149,
+ 3938292,
+ 3948357,
+ 3958542,
+ 3968658,
+ 3978747,
+ 3988818,
+ 3999000,
+ 4009013,
+ 4019014,
+ 4029173,
+ 4039250,
+ 4049428
+ ],
+ "target_mean": [
+ 0.011481122545650028,
+ 0.05957879896566901,
+ 0.08964089127397544,
+ 0.14473099226804123,
+ 0.19174053804694138,
+ 0.2329027062964748,
+ 0.23073610706700465,
+ 0.29793589469178083,
+ 0.37219013821953356,
+ 0.3579409623459298,
+ 0.43050938190742427,
+ 0.4703670426870518,
+ 0.4542308509397494,
+ 0.5312895203253402,
+ 0.5372921318783414,
+ 0.5760836456372681,
+ 0.6165970758365423,
+ 0.6129327197488584,
+ 0.5885912279092765,
+ 0.6138169111394558,
+ 0.6188179283011067,
+ 0.6532192905449343,
+ 0.6890831469711237,
+ 0.6984617642829755,
+ 0.6476845799180327,
+ 0.6748282219216049,
+ 0.6866497544157174,
+ 0.6767478193072441,
+ 0.7039737914897722,
+ 0.6969970608631144,
+ 0.7017238840162325,
+ 0.7113280243493945,
+ 0.7115597031124766,
+ 0.6936853053986392,
+ 0.6992080209776994,
+ 0.7018658505961006,
+ 0.7169217222079259,
+ 0.7454723011363636,
+ 0.7269036807676982,
+ 0.7362106020061531,
+ 0.7482080029636295,
+ 0.7957770466141002,
+ 0.7827550654116145,
+ 0.8738391986345904,
+ 0.8815680512224627,
+ 0.9148773947762165,
+ 0.9831406926052183,
+ 0.9909494426751593,
+ 1.0342052293428758,
+ 0.9662419545514251,
+ 1.0817187301259221,
+ 1.0639699149771091,
+ 1.0799047886255007,
+ 1.097533499576933,
+ 1.0324238589008985,
+ 1.0168432550072,
+ 1.0454139250787609,
+ 1.057019669327252,
+ 1.1414686023234444,
+ 1.1137328638267243,
+ 1.0637361345355474,
+ 1.1023987903225807,
+ 1.1169469036985784,
+ 1.1750584767964072,
+ 1.1627658043579818,
+ 1.1574876156954765,
+ 1.2100571135538019,
+ 1.2185613945907565,
+ 1.204016899141631,
+ 1.3142312374076561,
+ 1.1647078166890081,
+ 1.1899464084620335,
+ 1.1251211833020511,
+ 1.1027002131051535,
+ 1.15646925744249,
+ 1.0864193925233645,
+ 1.1100106577344702,
+ 1.1378624857070259,
+ 1.1246469809666104,
+ 1.1183457857622565,
+ 1.1106462699369437,
+ 1.1692266497045305,
+ 1.1634182459543707,
+ 1.1381242478941034,
+ 1.124828187165592,
+ 1.2107813125583877,
+ 1.1311060608585521,
+ 1.1174656832196275,
+ 1.072309375,
+ 1.0871784658344283,
+ 1.0246341805644643,
+ 1.018875714869281,
+ 1.0411848783557047,
+ 1.0515660750336473,
+ 1.0786580723272534,
+ 1.0953932904654853,
+ 1.09479117873394,
+ 1.1155599829408616,
+ 1.0807172773995462,
+ 1.106163355472373,
+ 1.074041488134476,
+ 1.0305810638978712,
+ 1.1051756007393716,
+ 1.123456369782095,
+ 1.0458708733394713,
+ 1.0765683598982052,
+ 1.0489330013828926,
+ 1.1791706906886905,
+ 1.1495122090853576,
+ 1.1472887081033787,
+ 1.1438163049652499,
+ 1.1586309917902542,
+ 1.1484469307270233,
+ 1.0912836010319573,
+ 1.158123725427922,
+ 1.0427042522291272,
+ 1.066186211768617,
+ 1.1147234490193352,
+ 1.1544007833633905,
+ 1.15770293058561,
+ 1.1166476849342908,
+ 1.1297111490971854,
+ 1.2521119368168487,
+ 1.1402947017638347,
+ 1.1286122366618836,
+ 1.2022766662860498,
+ 1.2589999159946237,
+ 1.2052202238550922,
+ 1.168573067161265,
+ 1.1472135206143896,
+ 1.2121854558803897,
+ 1.2242794336195508,
+ 1.1255944907593411,
+ 1.2019100288722826,
+ 1.1314372467982228,
+ 1.1783541186245252,
+ 1.1706442010051923,
+ 1.1181088572124755,
+ 1.159606243251451,
+ 1.158010188564477,
+ 1.188442394235768,
+ 1.0706618535327153,
+ 1.133058938108437,
+ 1.1871582919406745,
+ 1.2111659510693453,
+ 1.1987495178763081,
+ 1.1507535562459124,
+ 1.0935046987285792,
+ 1.1317952688455455,
+ 1.1771192028264332,
+ 1.218026341495181,
+ 1.1872157426097147,
+ 1.2121090609086036,
+ 1.265248720586235,
+ 1.1568490336293542,
+ 1.1833790453074433,
+ 1.2186744182246045,
+ 1.1841928313753032,
+ 1.196099477939251,
+ 1.201377736044554,
+ 1.154016738126649,
+ 1.2430611105756875,
+ 1.2115980290796902,
+ 1.2294897474198805,
+ 1.2711562702003878,
+ 1.2705662331326895,
+ 1.24730515438247,
+ 1.2767847127804215,
+ 1.306464614216079,
+ 1.2600840423066737,
+ 1.2978799344204748,
+ 1.3363138576509035,
+ 1.3011663186741484,
+ 1.3597593758195647,
+ 1.3342873490861793,
+ 1.2703875620647407,
+ 1.2923588696531039,
+ 1.3083023637611082,
+ 1.3341006976287173,
+ 1.286608418028431,
+ 1.4060154136609557,
+ 1.307338109959893,
+ 1.281186012498338,
+ 1.3520552004315982,
+ 1.4083239984185556,
+ 1.4135109177113983,
+ 1.3119848857252299,
+ 1.3845605032211412,
+ 1.478911559275107,
+ 1.3504099396607958,
+ 1.3589613791423,
+ 1.4248890732963013,
+ 1.3809494128658952,
+ 1.4593982326855355,
+ 1.4200573432343235,
+ 1.448342132265325,
+ 1.5009830406111255,
+ 1.4867572727848903,
+ 1.5095715558687002,
+ 1.5718030090254986,
+ 1.477251709472982,
+ 1.532907196969697,
+ 1.5377669829130935,
+ 1.4794233851084813,
+ 1.567243168271821,
+ 1.609865526073821,
+ 1.4743604891157838,
+ 1.4977858578205296,
+ 1.4138090394295302,
+ 1.4806754411573895,
+ 1.4086343840987294,
+ 1.4552829824619022,
+ 1.3769408577731637,
+ 1.328353439339219,
+ 1.4009221444314792,
+ 1.3206149321410452,
+ 1.4174538947639927,
+ 1.4443309425561064,
+ 1.4029634567126725,
+ 1.4010777360301907,
+ 1.4498867813617697,
+ 1.4481132500327525,
+ 1.4990691489361703,
+ 1.5159556370913942,
+ 1.604844915295101,
+ 1.5387094282554963,
+ 1.508843407036798,
+ 1.5900633183441344,
+ 1.4823726440538139,
+ 1.4928887989576753,
+ 1.5962761671441594,
+ 1.563736681265525,
+ 1.5181906416851152,
+ 1.6071118421052633,
+ 1.5204604578563996,
+ 1.4615907911477166,
+ 1.461617687727746,
+ 1.3894918201726136,
+ 1.4618031959864728,
+ 1.4312333113498905,
+ 1.5073228012008446,
+ 1.4483117200633648,
+ 1.5218941581698502,
+ 1.5594941532258064,
+ 1.5263161277422304,
+ 1.5289586782121483,
+ 1.455535369828389,
+ 1.3785138779268613,
+ 1.4761115844958783,
+ 1.5030175682507583,
+ 1.503847028994929,
+ 1.4746534003586065,
+ 1.5606737345820758,
+ 1.4294906864660675,
+ 1.5303258822852257,
+ 1.4597828888029893,
+ 1.4096778189540307,
+ 1.4756799877543332,
+ 1.4923126224689744,
+ 1.4073292005682552,
+ 1.3282230520329454,
+ 1.4488929866285258,
+ 1.3635227910519563,
+ 1.358456430975362,
+ 1.4068332654723128,
+ 1.3007301512287335,
+ 1.3306116928860248,
+ 1.2865448113207547,
+ 1.2989268432806835,
+ 1.2438071890197082,
+ 1.2814076778720627,
+ 1.2599077618373649,
+ 1.197518023369353,
+ 1.2691174872843163,
+ 1.2470862454170486,
+ 1.2153029092071612,
+ 1.198529729562855,
+ 1.1611844276464671,
+ 1.1819025283789166,
+ 1.20233615915274,
+ 1.2393508219008775,
+ 1.1402066267758864,
+ 1.179647406975981,
+ 1.102640462389672,
+ 1.0841915857318019,
+ 1.053633949173183,
+ 1.0749520692846755,
+ 1.1576478805632138,
+ 1.2051625723692583,
+ 1.1773168138498569,
+ 1.149700361918982,
+ 1.178846611837927,
+ 1.202017395027274,
+ 1.2164815959291126,
+ 1.1787987403722722,
+ 1.182890171078115,
+ 1.188607634417257,
+ 1.202584986707365,
+ 1.1539330174507798,
+ 1.1308595687624008,
+ 1.1462176310741687,
+ 1.1774554992994524,
+ 1.1690097845873786,
+ 1.195963641826923,
+ 1.208462517725925,
+ 1.2120681313451778,
+ 1.2017291931890515,
+ 1.242299688651671,
+ 1.1919668067635987,
+ 1.1265813964474678,
+ 1.2294634679762506,
+ 1.216643338299372,
+ 1.2031350185441287,
+ 1.2611343612334802,
+ 1.253554800838905,
+ 1.2507149568269351,
+ 1.2460493082638515,
+ 1.1919967470340604,
+ 1.1845049062258315,
+ 1.242604916635089,
+ 1.2844249137490416,
+ 1.2366976243111625,
+ 1.2289429322738064,
+ 1.265017099133972,
+ 1.2271258771504334,
+ 1.2575234653875573,
+ 1.2578756354275287,
+ 1.2281593959191555,
+ 1.2555103486437273,
+ 1.2946756168768498,
+ 1.282519161619985,
+ 1.3598832118515713,
+ 1.3011102278340332,
+ 1.3555579333224115,
+ 1.3176587461350167,
+ 1.2476482439356595,
+ 1.2668343115686023,
+ 1.233616960463155,
+ 1.3211400707516967,
+ 1.2722894546332046,
+ 1.3284889761888992,
+ 1.2337813783282554,
+ 1.2208017565620999,
+ 1.237968232615894,
+ 1.2463930854035226,
+ 1.2196328042935924,
+ 1.2109330929487179,
+ 1.2534009042273861,
+ 1.1996318983469998,
+ 1.192708165387592,
+ 1.131473591778327,
+ 1.1654559365539028,
+ 1.116221000973394,
+ 1.1068991762177651,
+ 1.111732987128839,
+ 1.0641317565391057,
+ 1.0984519313233936,
+ 1.0725795924734007,
+ 1.0686059627369515,
+ 1.0226983610614382,
+ 1.0344605405671525,
+ 1.0590198679257528,
+ 1.0827012199006243,
+ 1.0137071647926688,
+ 1.030751312548613,
+ 0.9754911988779711,
+ 0.9963874628696887,
+ 0.9790020502473314,
+ 0.9322393533123028,
+ 0.9440924267100977,
+ 1.006139330489854,
+ 0.9742051329994798,
+ 0.9112583632506528,
+ 1.0250162907978242,
+ 1.0109593937369656,
+ 1.0490472412899883,
+ 1.1303678294380077,
+ 1.137658769830864,
+ 1.106645399869961,
+ 1.1644146156158162,
+ 1.1728524037243475,
+ 1.160199526239067,
+ 1.19645512737618,
+ 1.1403755935345388,
+ 1.172982626443648,
+ 1.133411667269499,
+ 1.1773170895475227,
+ 1.2038579398447606,
+ 1.1854141746411484,
+ 1.1923296376200363,
+ 1.2014295724564927,
+ 1.2243024373953908,
+ 1.1379754371085595,
+ 1.1593861379837067,
+ 1.1317555444854848,
+ 1.144638262074426,
+ 1.1967253901610135,
+ 1.273919502807901,
+ 1.1925903940766323,
+ 1.1981184131642055
+ ],
+ "target_mean_T": [
+ 7092,
+ 17295,
+ 27360,
+ 37425,
+ 47446,
+ 57702,
+ 67766,
+ 77887,
+ 88117,
+ 98180,
+ 108201,
+ 118363,
+ 128491,
+ 138566,
+ 148760,
+ 158815,
+ 169025,
+ 179049,
+ 189282,
+ 199401,
+ 209460,
+ 219584,
+ 229726,
+ 239732,
+ 249735,
+ 259775,
+ 269808,
+ 279861,
+ 289940,
+ 299973,
+ 310048,
+ 320136,
+ 330150,
+ 340224,
+ 350309,
+ 360399,
+ 370496,
+ 380586,
+ 390819,
+ 400869,
+ 410935,
+ 420950,
+ 431080,
+ 441210,
+ 451272,
+ 461380,
+ 471442,
+ 481588,
+ 491642,
+ 501803,
+ 511970,
+ 522049,
+ 532228,
+ 542420,
+ 552444,
+ 562617,
+ 572728,
+ 583006,
+ 593033,
+ 603151,
+ 613362,
+ 623380,
+ 633578,
+ 643643,
+ 653716,
+ 663826,
+ 673980,
+ 684046,
+ 694187,
+ 704361,
+ 714366,
+ 724371,
+ 734482,
+ 744535,
+ 754765,
+ 764823,
+ 774937,
+ 785022,
+ 795248,
+ 805473,
+ 815528,
+ 825625,
+ 835868,
+ 845907,
+ 856040,
+ 866047,
+ 876264,
+ 886567,
+ 896651,
+ 906842,
+ 916946,
+ 927044,
+ 937159,
+ 947297,
+ 957483,
+ 967619,
+ 977685,
+ 987931,
+ 997992,
+ 1008057,
+ 1018161,
+ 1028173,
+ 1038181,
+ 1048423,
+ 1058516,
+ 1068664,
+ 1078745,
+ 1088915,
+ 1098982,
+ 1109051,
+ 1119248,
+ 1129476,
+ 1139634,
+ 1149642,
+ 1159787,
+ 1169921,
+ 1179950,
+ 1190126,
+ 1200324,
+ 1210423,
+ 1220527,
+ 1230722,
+ 1240952,
+ 1250992,
+ 1261097,
+ 1271343,
+ 1281413,
+ 1291499,
+ 1301710,
+ 1311847,
+ 1321892,
+ 1331977,
+ 1341990,
+ 1352171,
+ 1362304,
+ 1372388,
+ 1382502,
+ 1392710,
+ 1402796,
+ 1412887,
+ 1422977,
+ 1433185,
+ 1443192,
+ 1453280,
+ 1463367,
+ 1473426,
+ 1483538,
+ 1493651,
+ 1503789,
+ 1513807,
+ 1524054,
+ 1534270,
+ 1544475,
+ 1554621,
+ 1564794,
+ 1574902,
+ 1584946,
+ 1594969,
+ 1605030,
+ 1615240,
+ 1625376,
+ 1635501,
+ 1645593,
+ 1655647,
+ 1665666,
+ 1675690,
+ 1685894,
+ 1695985,
+ 1706141,
+ 1716287,
+ 1726355,
+ 1736463,
+ 1746631,
+ 1756661,
+ 1766740,
+ 1776977,
+ 1787103,
+ 1797200,
+ 1807240,
+ 1817361,
+ 1827400,
+ 1837414,
+ 1847427,
+ 1857487,
+ 1867647,
+ 1877732,
+ 1887967,
+ 1898042,
+ 1908247,
+ 1918306,
+ 1928494,
+ 1938721,
+ 1948923,
+ 1959067,
+ 1969319,
+ 1979533,
+ 1989846,
+ 1999888,
+ 2009996,
+ 2020061,
+ 2030264,
+ 2040327,
+ 2050460,
+ 2060522,
+ 2070580,
+ 2080743,
+ 2090936,
+ 2101068,
+ 2111206,
+ 2121377,
+ 2131404,
+ 2141639,
+ 2151758,
+ 2161825,
+ 2171926,
+ 2181983,
+ 2192015,
+ 2202021,
+ 2212255,
+ 2222394,
+ 2232596,
+ 2242789,
+ 2252936,
+ 2262978,
+ 2273109,
+ 2283149,
+ 2293150,
+ 2303299,
+ 2313529,
+ 2323715,
+ 2333847,
+ 2343908,
+ 2354027,
+ 2364236,
+ 2374408,
+ 2384421,
+ 2394449,
+ 2404570,
+ 2414720,
+ 2424739,
+ 2434910,
+ 2444954,
+ 2455136,
+ 2465287,
+ 2475416,
+ 2485480,
+ 2495611,
+ 2505778,
+ 2516009,
+ 2526028,
+ 2536301,
+ 2546337,
+ 2556388,
+ 2566565,
+ 2576644,
+ 2586843,
+ 2596917,
+ 2607004,
+ 2617084,
+ 2627196,
+ 2637376,
+ 2647508,
+ 2657647,
+ 2667719,
+ 2677916,
+ 2688062,
+ 2698165,
+ 2708266,
+ 2718409,
+ 2728434,
+ 2738546,
+ 2748676,
+ 2758916,
+ 2768940,
+ 2779034,
+ 2789172,
+ 2799238,
+ 2809325,
+ 2819376,
+ 2829617,
+ 2839810,
+ 2850035,
+ 2860186,
+ 2870357,
+ 2880499,
+ 2890628,
+ 2900645,
+ 2910940,
+ 2921130,
+ 2931376,
+ 2941396,
+ 2951566,
+ 2961627,
+ 2971677,
+ 2981706,
+ 2991857,
+ 3001949,
+ 3012036,
+ 3022219,
+ 3032228,
+ 3042327,
+ 3052539,
+ 3062795,
+ 3072890,
+ 3082958,
+ 3093114,
+ 3103171,
+ 3113209,
+ 3123383,
+ 3133426,
+ 3143440,
+ 3153512,
+ 3163568,
+ 3173608,
+ 3183697,
+ 3193908,
+ 3203975,
+ 3214077,
+ 3224153,
+ 3234356,
+ 3244419,
+ 3254536,
+ 3264737,
+ 3274833,
+ 3284908,
+ 3295136,
+ 3305228,
+ 3315439,
+ 3325546,
+ 3335646,
+ 3345788,
+ 3355992,
+ 3366175,
+ 3376417,
+ 3386451,
+ 3396577,
+ 3406704,
+ 3416900,
+ 3427128,
+ 3437217,
+ 3447329,
+ 3457428,
+ 3467437,
+ 3477621,
+ 3487827,
+ 3497962,
+ 3508158,
+ 3518325,
+ 3528428,
+ 3538602,
+ 3548805,
+ 3558923,
+ 3569065,
+ 3579228,
+ 3589313,
+ 3599339,
+ 3609365,
+ 3619431,
+ 3629497,
+ 3639722,
+ 3649807,
+ 3660057,
+ 3670255,
+ 3680439,
+ 3690694,
+ 3700742,
+ 3710792,
+ 3720932,
+ 3731140,
+ 3741222,
+ 3751315,
+ 3761564,
+ 3771760,
+ 3781886,
+ 3792129,
+ 3802264,
+ 3812528,
+ 3822619,
+ 3832758,
+ 3842873,
+ 3852997,
+ 3863097,
+ 3873229,
+ 3883380,
+ 3893402,
+ 3903403,
+ 3913426,
+ 3923558,
+ 3933711,
+ 3943715,
+ 3953867,
+ 3963931,
+ 3974121,
+ 3984302,
+ 3994504,
+ 4004625,
+ 4014639,
+ 4024664,
+ 4034759,
+ 4044842
+ ],
+ "td_error_abs": [
+ 0.25948769644163316,
+ 0.15221050423635563,
+ 0.13752241464948858,
+ 0.1831723179768041,
+ 0.2002368841654719,
+ 0.1953959253595342,
+ 0.18948279788769185,
+ 0.2092217933968322,
+ 0.2364792236262301,
+ 0.2039067427123544,
+ 0.22757921825225913,
+ 0.2757921142743286,
+ 0.2538487783466076,
+ 0.22840394784895762,
+ 0.2424137813355718,
+ 0.2478074773597153,
+ 0.25650274438570575,
+ 0.24557326703155577,
+ 0.24182818929836034,
+ 0.2690107488307823,
+ 0.2324597026658504,
+ 0.27998036714380936,
+ 0.23746478122057438,
+ 0.2529201702593782,
+ 0.23005391905737704,
+ 0.21737199259477696,
+ 0.21556200090694197,
+ 0.2588806770734391,
+ 0.21948966160407019,
+ 0.24115850915910045,
+ 0.2912858128313588,
+ 0.2707221982897449,
+ 0.2252022460154789,
+ 0.27883978131218706,
+ 0.25000181482888684,
+ 0.2481782240070478,
+ 0.28337495325768786,
+ 0.2624364963247756,
+ 0.22806865536591844,
+ 0.2582025428931387,
+ 0.22259580579983088,
+ 0.28794859626581965,
+ 0.24410493092393906,
+ 0.260732853645156,
+ 0.2618891479072488,
+ 0.2791628930645004,
+ 0.24729574325279435,
+ 0.26309374377985667,
+ 0.4049092438597918,
+ 0.28948375722448444,
+ 0.3057898281488489,
+ 0.27899751931409417,
+ 0.279211846822116,
+ 0.2689273499536254,
+ 0.24763760005982224,
+ 0.3176435402416874,
+ 0.26432666092539875,
+ 0.3683653632965919,
+ 0.3180154373277107,
+ 0.3900065487583432,
+ 0.2576836198491016,
+ 0.2593623519405242,
+ 0.2744594261561989,
+ 0.3375756477102613,
+ 0.32002661562958823,
+ 0.3280396414866054,
+ 0.3565469995322091,
+ 0.42900607470791374,
+ 0.37513824792316947,
+ 0.34297845659838816,
+ 0.2671987906857406,
+ 0.37110770089285716,
+ 0.2848390864852862,
+ 0.26350058893683304,
+ 0.26006713371638196,
+ 0.33575186837658544,
+ 0.37782613671927867,
+ 0.2673154978024552,
+ 0.2838080050466091,
+ 0.32160183795752184,
+ 0.31615571004111026,
+ 0.38473414159963887,
+ 0.2786222903920447,
+ 0.2803774722869869,
+ 0.2747858502538071,
+ 0.32928995447834647,
+ 0.26071536815924545,
+ 0.34075846055210246,
+ 0.250946484375,
+ 0.24076554928753285,
+ 0.28765547326010266,
+ 0.2642344675500409,
+ 0.2823502385696309,
+ 0.29128056522333445,
+ 0.3221932701828984,
+ 0.25913173753223356,
+ 0.26919495217039446,
+ 0.2644324870488527,
+ 0.2923456063943399,
+ 0.3401256423289055,
+ 0.3103718404952208,
+ 0.24596329167087713,
+ 0.37073814406192235,
+ 0.2490980449264899,
+ 0.35219354716066026,
+ 0.2502100014859028,
+ 0.2620143735981348,
+ 0.2623121740021813,
+ 0.3772570997909316,
+ 0.30263033014621754,
+ 0.3471198062850842,
+ 0.25396980673579844,
+ 0.3303749785665295,
+ 0.2684304553563998,
+ 0.33332658611092536,
+ 0.2983621732829809,
+ 0.3132992034262799,
+ 0.31369910437169635,
+ 0.3269407151210067,
+ 0.39408196389175343,
+ 0.29891266956543827,
+ 0.27584480694868563,
+ 0.35952997169504797,
+ 0.3130639198090582,
+ 0.2742123423436277,
+ 0.3219068214349798,
+ 0.27539315172421036,
+ 0.33234234156057757,
+ 0.27296828478865676,
+ 0.32276489290706684,
+ 0.31218940593295286,
+ 0.2911271814625991,
+ 0.2597966044083551,
+ 0.34479827880859376,
+ 0.29313379697505554,
+ 0.2771069448536269,
+ 0.30637187853647985,
+ 0.35190940268422793,
+ 0.2838039699394318,
+ 0.26906136674924813,
+ 0.33940801578340263,
+ 0.25475591166579603,
+ 0.26389353593792253,
+ 0.3718984200101441,
+ 0.3137034249939242,
+ 0.34411748830326,
+ 0.3519444707018476,
+ 0.27888619510952184,
+ 0.2933637893507841,
+ 0.2964550107401141,
+ 0.3719506905891834,
+ 0.3542715739048867,
+ 0.35597260494840527,
+ 0.2901688369236262,
+ 0.2839235465716646,
+ 0.29679567404935275,
+ 0.32956028770100665,
+ 0.3552661048636828,
+ 0.30808836803850814,
+ 0.3317507776073371,
+ 0.26285753073981694,
+ 0.33044672024583976,
+ 0.29941265861928906,
+ 0.3297881002597094,
+ 0.29439704367323855,
+ 0.3811870705657164,
+ 0.3039187650439907,
+ 0.28686091917700546,
+ 0.3153119861570179,
+ 0.3275088617357157,
+ 0.29670197616160326,
+ 0.31971277072920673,
+ 0.2794043695150398,
+ 0.29031284831497506,
+ 0.28416597575998864,
+ 0.32712208391435066,
+ 0.29381827532032473,
+ 0.30576915910464586,
+ 0.3014459598740665,
+ 0.28837321709553937,
+ 0.2824656712429956,
+ 0.3243627538018048,
+ 0.3095953076178367,
+ 0.3310334856460895,
+ 0.36957855305045795,
+ 0.3042868245390655,
+ 0.4540597294153792,
+ 0.3566506782268932,
+ 0.37932764298664423,
+ 0.3396404109589041,
+ 0.3171138612227908,
+ 0.3158915403040746,
+ 0.33431812803139893,
+ 0.35428781883803967,
+ 0.3087744624071782,
+ 0.3445667543112009,
+ 0.3560228644167048,
+ 0.37562673798168333,
+ 0.36768050864141244,
+ 0.42304497660786416,
+ 0.34589048949299533,
+ 0.3773154404247117,
+ 0.3666715330218866,
+ 0.3772200264012163,
+ 0.38171787919669314,
+ 0.41701848400761854,
+ 0.295620435073797,
+ 0.37500875032942416,
+ 0.3092396903838087,
+ 0.429559743526583,
+ 0.32369392520223994,
+ 0.3198015751239819,
+ 0.330559816417402,
+ 0.3382787158490503,
+ 0.332731195759189,
+ 0.33993918904188863,
+ 0.29626729324179457,
+ 0.35835403091972207,
+ 0.3474835136155113,
+ 0.39823073047702595,
+ 0.3044290000641641,
+ 0.3361390047736801,
+ 0.35821819218280904,
+ 0.3007263305745497,
+ 0.303066006935515,
+ 0.32561194274830885,
+ 0.3282513415913493,
+ 0.35539167505331637,
+ 0.35775063906031856,
+ 0.41926641217229943,
+ 0.3012885754268948,
+ 0.2909069123619101,
+ 0.3354373280619221,
+ 0.3140431936163651,
+ 0.3451098899563443,
+ 0.3337824121446866,
+ 0.33774434396961217,
+ 0.3585709052557001,
+ 0.2956750158024662,
+ 0.30416871295320747,
+ 0.33793791959455,
+ 0.32452316017797106,
+ 0.3061784341523619,
+ 0.3144303490423387,
+ 0.29735668199309545,
+ 0.35198282105564754,
+ 0.31133538718488096,
+ 0.3131472520594909,
+ 0.34169219022471464,
+ 0.3051224912020507,
+ 0.28680787354944093,
+ 0.3066550708207928,
+ 0.3536096804881249,
+ 0.3505851424608408,
+ 0.37572089096007105,
+ 0.3425324740690633,
+ 0.2836601445390573,
+ 0.33841692865329065,
+ 0.36345978680192687,
+ 0.2960043799641612,
+ 0.2821127429851941,
+ 0.3182449381423796,
+ 0.2895468035900417,
+ 0.32951356918338837,
+ 0.30703493994299674,
+ 0.33433441758230936,
+ 0.3052197105209123,
+ 0.2942869187642323,
+ 0.2825801934311885,
+ 0.31492431578137,
+ 0.2909945256405026,
+ 0.30919957811293747,
+ 0.2805804170802955,
+ 0.34839480211217166,
+ 0.2794464219220571,
+ 0.3202046660206202,
+ 0.3127320813202334,
+ 0.3050557060519864,
+ 0.30337979827734995,
+ 0.286588326592661,
+ 0.288936431586794,
+ 0.27535475229585304,
+ 0.2718273062204686,
+ 0.30558330408748846,
+ 0.2644541459287881,
+ 0.2574367281137835,
+ 0.2743500817094738,
+ 0.26735243034187506,
+ 0.31862007332357806,
+ 0.3198578254090229,
+ 0.3132890034142917,
+ 0.27919044916169183,
+ 0.27733699842620196,
+ 0.2695876528488988,
+ 0.3056522785622593,
+ 0.26344526080334085,
+ 0.2620858570106246,
+ 0.30259776907329,
+ 0.30731886276288034,
+ 0.2712930951799665,
+ 0.2980494350423593,
+ 0.3114659395996529,
+ 0.2751055031816875,
+ 0.2840313251201923,
+ 0.2818085932464226,
+ 0.3127155749325825,
+ 0.2846950713120624,
+ 0.25496233117097156,
+ 0.3616840194668473,
+ 0.25482056248425294,
+ 0.2687359733253853,
+ 0.2783196867390747,
+ 0.2714783282684027,
+ 0.30141822810140023,
+ 0.24799638066990193,
+ 0.27757063183314695,
+ 0.2826665803397324,
+ 0.2756984558377982,
+ 0.30861436254310065,
+ 0.26989029121471203,
+ 0.35776047739506134,
+ 0.318753760823161,
+ 0.261283566598935,
+ 0.26456255100011483,
+ 0.30393818312637433,
+ 0.2898091146497323,
+ 0.3517926388164755,
+ 0.2715296642173822,
+ 0.29358052722356526,
+ 0.27459687736254984,
+ 0.29205902892773783,
+ 0.3200503330887921,
+ 0.2663446993084712,
+ 0.3165647334227556,
+ 0.28273881117543803,
+ 0.29466902524646515,
+ 0.32744040886152687,
+ 0.29711166563959523,
+ 0.283189931589352,
+ 0.2878643256817085,
+ 0.27541575379396277,
+ 0.25736880052925054,
+ 0.2609314042993758,
+ 0.29106717685382705,
+ 0.30664559894806453,
+ 0.29062915449382876,
+ 0.26966102013221155,
+ 0.2805824835688263,
+ 0.3198455091598334,
+ 0.32415551842835033,
+ 0.2760946214997723,
+ 0.31756017357797484,
+ 0.28724080852530826,
+ 0.30966173977279804,
+ 0.25999492491159043,
+ 0.2556938148776736,
+ 0.24905023852915953,
+ 0.29716182434651256,
+ 0.23921121593823033,
+ 0.29622983134619685,
+ 0.25405771206163025,
+ 0.2485318763233791,
+ 0.30799063027535356,
+ 0.25776150117192576,
+ 0.259602553367862,
+ 0.30185140352169365,
+ 0.2848775204216712,
+ 0.24900462322637335,
+ 0.26733044164037856,
+ 0.28826140065146577,
+ 0.25006168992140043,
+ 0.24784004775095927,
+ 0.2531303226480907,
+ 0.27631665820084833,
+ 0.24346298520085213,
+ 0.2638692807663353,
+ 0.26262147532336194,
+ 0.30327252790251735,
+ 0.26579269190913524,
+ 0.29662916350103524,
+ 0.32675553624588294,
+ 0.2671380021544941,
+ 0.26941629900871267,
+ 0.2502179702236731,
+ 0.278547567785079,
+ 0.2516139355455906,
+ 0.30970805817113245,
+ 0.25618915434488193,
+ 0.25182481936182594,
+ 0.26790603253552425,
+ 0.2921287689821787,
+ 0.26242160907626016,
+ 0.27759709686725276,
+ 0.2828838361337407,
+ 0.2766299329315708,
+ 0.2807958043637833,
+ 0.27403130593613895,
+ 0.2700475194858637,
+ 0.26357137738966996,
+ 0.25755646510751923
+ ],
+ "td_error_abs_T": [
+ 7092,
+ 17295,
+ 27360,
+ 37425,
+ 47446,
+ 57702,
+ 67766,
+ 77887,
+ 88117,
+ 98180,
+ 108201,
+ 118363,
+ 128491,
+ 138566,
+ 148760,
+ 158815,
+ 169025,
+ 179049,
+ 189282,
+ 199401,
+ 209460,
+ 219584,
+ 229726,
+ 239732,
+ 249735,
+ 259775,
+ 269808,
+ 279861,
+ 289940,
+ 299973,
+ 310048,
+ 320136,
+ 330150,
+ 340224,
+ 350309,
+ 360399,
+ 370496,
+ 380586,
+ 390819,
+ 400869,
+ 410935,
+ 420950,
+ 431080,
+ 441210,
+ 451272,
+ 461380,
+ 471442,
+ 481588,
+ 491642,
+ 501803,
+ 511970,
+ 522049,
+ 532228,
+ 542420,
+ 552444,
+ 562617,
+ 572728,
+ 583006,
+ 593033,
+ 603151,
+ 613362,
+ 623380,
+ 633578,
+ 643643,
+ 653716,
+ 663826,
+ 673980,
+ 684046,
+ 694187,
+ 704361,
+ 714366,
+ 724371,
+ 734482,
+ 744535,
+ 754765,
+ 764823,
+ 774937,
+ 785022,
+ 795248,
+ 805473,
+ 815528,
+ 825625,
+ 835868,
+ 845907,
+ 856040,
+ 866047,
+ 876264,
+ 886567,
+ 896651,
+ 906842,
+ 916946,
+ 927044,
+ 937159,
+ 947297,
+ 957483,
+ 967619,
+ 977685,
+ 987931,
+ 997992,
+ 1008057,
+ 1018161,
+ 1028173,
+ 1038181,
+ 1048423,
+ 1058516,
+ 1068664,
+ 1078745,
+ 1088915,
+ 1098982,
+ 1109051,
+ 1119248,
+ 1129476,
+ 1139634,
+ 1149642,
+ 1159787,
+ 1169921,
+ 1179950,
+ 1190126,
+ 1200324,
+ 1210423,
+ 1220527,
+ 1230722,
+ 1240952,
+ 1250992,
+ 1261097,
+ 1271343,
+ 1281413,
+ 1291499,
+ 1301710,
+ 1311847,
+ 1321892,
+ 1331977,
+ 1341990,
+ 1352171,
+ 1362304,
+ 1372388,
+ 1382502,
+ 1392710,
+ 1402796,
+ 1412887,
+ 1422977,
+ 1433185,
+ 1443192,
+ 1453280,
+ 1463367,
+ 1473426,
+ 1483538,
+ 1493651,
+ 1503789,
+ 1513807,
+ 1524054,
+ 1534270,
+ 1544475,
+ 1554621,
+ 1564794,
+ 1574902,
+ 1584946,
+ 1594969,
+ 1605030,
+ 1615240,
+ 1625376,
+ 1635501,
+ 1645593,
+ 1655647,
+ 1665666,
+ 1675690,
+ 1685894,
+ 1695985,
+ 1706141,
+ 1716287,
+ 1726355,
+ 1736463,
+ 1746631,
+ 1756661,
+ 1766740,
+ 1776977,
+ 1787103,
+ 1797200,
+ 1807240,
+ 1817361,
+ 1827400,
+ 1837414,
+ 1847427,
+ 1857487,
+ 1867647,
+ 1877732,
+ 1887967,
+ 1898042,
+ 1908247,
+ 1918306,
+ 1928494,
+ 1938721,
+ 1948923,
+ 1959067,
+ 1969319,
+ 1979533,
+ 1989846,
+ 1999888,
+ 2009996,
+ 2020061,
+ 2030264,
+ 2040327,
+ 2050460,
+ 2060522,
+ 2070580,
+ 2080743,
+ 2090936,
+ 2101068,
+ 2111206,
+ 2121377,
+ 2131404,
+ 2141639,
+ 2151758,
+ 2161825,
+ 2171926,
+ 2181983,
+ 2192015,
+ 2202021,
+ 2212255,
+ 2222394,
+ 2232596,
+ 2242789,
+ 2252936,
+ 2262978,
+ 2273109,
+ 2283149,
+ 2293150,
+ 2303299,
+ 2313529,
+ 2323715,
+ 2333847,
+ 2343908,
+ 2354027,
+ 2364236,
+ 2374408,
+ 2384421,
+ 2394449,
+ 2404570,
+ 2414720,
+ 2424739,
+ 2434910,
+ 2444954,
+ 2455136,
+ 2465287,
+ 2475416,
+ 2485480,
+ 2495611,
+ 2505778,
+ 2516009,
+ 2526028,
+ 2536301,
+ 2546337,
+ 2556388,
+ 2566565,
+ 2576644,
+ 2586843,
+ 2596917,
+ 2607004,
+ 2617084,
+ 2627196,
+ 2637376,
+ 2647508,
+ 2657647,
+ 2667719,
+ 2677916,
+ 2688062,
+ 2698165,
+ 2708266,
+ 2718409,
+ 2728434,
+ 2738546,
+ 2748676,
+ 2758916,
+ 2768940,
+ 2779034,
+ 2789172,
+ 2799238,
+ 2809325,
+ 2819376,
+ 2829617,
+ 2839810,
+ 2850035,
+ 2860186,
+ 2870357,
+ 2880499,
+ 2890628,
+ 2900645,
+ 2910940,
+ 2921130,
+ 2931376,
+ 2941396,
+ 2951566,
+ 2961627,
+ 2971677,
+ 2981706,
+ 2991857,
+ 3001949,
+ 3012036,
+ 3022219,
+ 3032228,
+ 3042327,
+ 3052539,
+ 3062795,
+ 3072890,
+ 3082958,
+ 3093114,
+ 3103171,
+ 3113209,
+ 3123383,
+ 3133426,
+ 3143440,
+ 3153512,
+ 3163568,
+ 3173608,
+ 3183697,
+ 3193908,
+ 3203975,
+ 3214077,
+ 3224153,
+ 3234356,
+ 3244419,
+ 3254536,
+ 3264737,
+ 3274833,
+ 3284908,
+ 3295136,
+ 3305228,
+ 3315439,
+ 3325546,
+ 3335646,
+ 3345788,
+ 3355992,
+ 3366175,
+ 3376417,
+ 3386451,
+ 3396577,
+ 3406704,
+ 3416900,
+ 3427128,
+ 3437217,
+ 3447329,
+ 3457428,
+ 3467437,
+ 3477621,
+ 3487827,
+ 3497962,
+ 3508158,
+ 3518325,
+ 3528428,
+ 3538602,
+ 3548805,
+ 3558923,
+ 3569065,
+ 3579228,
+ 3589313,
+ 3599339,
+ 3609365,
+ 3619431,
+ 3629497,
+ 3639722,
+ 3649807,
+ 3660057,
+ 3670255,
+ 3680439,
+ 3690694,
+ 3700742,
+ 3710792,
+ 3720932,
+ 3731140,
+ 3741222,
+ 3751315,
+ 3761564,
+ 3771760,
+ 3781886,
+ 3792129,
+ 3802264,
+ 3812528,
+ 3822619,
+ 3832758,
+ 3842873,
+ 3852997,
+ 3863097,
+ 3873229,
+ 3883380,
+ 3893402,
+ 3903403,
+ 3913426,
+ 3923558,
+ 3933711,
+ 3943715,
+ 3953867,
+ 3963931,
+ 3974121,
+ 3984302,
+ 3994504,
+ 4004625,
+ 4014639,
+ 4024664,
+ 4034759,
+ 4044842
+ ],
+ "test_battle_won_mean": [
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0625,
+ 0.09375,
+ 0.15625,
+ 0.125,
+ 0.1875,
+ 0.125,
+ 0.09375,
+ 0.0,
+ 0.03125,
+ 0.0,
+ 0.09375,
+ 0.1875,
+ 0.0625,
+ 0.0625,
+ 0.125,
+ 0.03125,
+ 0.09375,
+ 0.0625,
+ 0.1875,
+ 0.0625,
+ 0.15625,
+ 0.03125,
+ 0.0625,
+ 0.1875,
+ 0.09375,
+ 0.15625,
+ 0.125,
+ 0.09375,
+ 0.03125,
+ 0.1875,
+ 0.1875,
+ 0.09375,
+ 0.1875,
+ 0.15625,
+ 0.125,
+ 0.15625,
+ 0.09375,
+ 0.125,
+ 0.15625,
+ 0.25,
+ 0.15625,
+ 0.21875,
+ 0.15625,
+ 0.21875,
+ 0.09375,
+ 0.03125,
+ 0.25,
+ 0.15625,
+ 0.125,
+ 0.15625,
+ 0.0625,
+ 0.1875,
+ 0.15625,
+ 0.28125,
+ 0.09375,
+ 0.15625,
+ 0.28125,
+ 0.25,
+ 0.09375,
+ 0.125,
+ 0.125,
+ 0.3125,
+ 0.1875,
+ 0.21875,
+ 0.15625,
+ 0.1875,
+ 0.09375,
+ 0.25,
+ 0.21875,
+ 0.1875,
+ 0.1875,
+ 0.21875,
+ 0.15625,
+ 0.21875,
+ 0.15625,
+ 0.1875,
+ 0.1875,
+ 0.1875,
+ 0.125,
+ 0.21875,
+ 0.1875,
+ 0.3125,
+ 0.3125,
+ 0.1875,
+ 0.125,
+ 0.15625,
+ 0.1875,
+ 0.1875,
+ 0.125,
+ 0.25,
+ 0.15625,
+ 0.21875,
+ 0.3125,
+ 0.125,
+ 0.1875,
+ 0.1875,
+ 0.15625,
+ 0.09375,
+ 0.1875,
+ 0.0625,
+ 0.1875,
+ 0.28125,
+ 0.0625,
+ 0.125,
+ 0.09375,
+ 0.1875,
+ 0.1875,
+ 0.125,
+ 0.15625,
+ 0.15625,
+ 0.09375,
+ 0.15625,
+ 0.125,
+ 0.375,
+ 0.09375,
+ 0.125,
+ 0.125,
+ 0.21875,
+ 0.1875,
+ 0.125,
+ 0.09375,
+ 0.21875,
+ 0.15625,
+ 0.25,
+ 0.28125,
+ 0.15625,
+ 0.34375,
+ 0.125,
+ 0.25,
+ 0.3125,
+ 0.0625,
+ 0.1875,
+ 0.1875,
+ 0.125,
+ 0.28125,
+ 0.3125,
+ 0.21875,
+ 0.28125,
+ 0.1875,
+ 0.21875,
+ 0.21875,
+ 0.1875,
+ 0.21875,
+ 0.28125,
+ 0.15625,
+ 0.28125,
+ 0.25,
+ 0.0625,
+ 0.15625,
+ 0.21875,
+ 0.1875,
+ 0.125,
+ 0.3125,
+ 0.1875,
+ 0.34375,
+ 0.09375,
+ 0.125,
+ 0.21875,
+ 0.3125,
+ 0.15625,
+ 0.1875,
+ 0.1875,
+ 0.21875,
+ 0.3125,
+ 0.1875,
+ 0.1875,
+ 0.15625,
+ 0.21875,
+ 0.1875,
+ 0.28125,
+ 0.375,
+ 0.25,
+ 0.3125,
+ 0.15625,
+ 0.15625,
+ 0.125,
+ 0.28125,
+ 0.15625,
+ 0.25,
+ 0.1875,
+ 0.09375,
+ 0.25,
+ 0.21875,
+ 0.28125,
+ 0.21875,
+ 0.1875,
+ 0.3125,
+ 0.25,
+ 0.15625,
+ 0.1875,
+ 0.1875,
+ 0.28125,
+ 0.28125,
+ 0.125,
+ 0.21875,
+ 0.375,
+ 0.09375,
+ 0.25,
+ 0.1875,
+ 0.0625,
+ 0.21875,
+ 0.09375,
+ 0.46875,
+ 0.09375,
+ 0.25,
+ 0.125,
+ 0.125,
+ 0.125,
+ 0.34375,
+ 0.125,
+ 0.25,
+ 0.34375,
+ 0.3125,
+ 0.21875,
+ 0.1875,
+ 0.28125,
+ 0.25,
+ 0.25,
+ 0.21875,
+ 0.25,
+ 0.21875,
+ 0.3125,
+ 0.28125,
+ 0.15625,
+ 0.21875,
+ 0.1875,
+ 0.1875,
+ 0.125,
+ 0.21875,
+ 0.375,
+ 0.3125,
+ 0.28125,
+ 0.21875,
+ 0.125,
+ 0.1875,
+ 0.21875,
+ 0.25,
+ 0.1875,
+ 0.34375,
+ 0.21875,
+ 0.40625,
+ 0.21875,
+ 0.15625,
+ 0.15625,
+ 0.1875,
+ 0.21875,
+ 0.28125,
+ 0.1875,
+ 0.09375,
+ 0.25,
+ 0.1875,
+ 0.40625,
+ 0.28125,
+ 0.3125,
+ 0.09375,
+ 0.28125,
+ 0.1875,
+ 0.21875,
+ 0.28125,
+ 0.25,
+ 0.125,
+ 0.3125,
+ 0.25,
+ 0.34375,
+ 0.15625,
+ 0.3125,
+ 0.21875,
+ 0.3125,
+ 0.15625,
+ 0.1875,
+ 0.28125,
+ 0.21875,
+ 0.25,
+ 0.1875,
+ 0.09375,
+ 0.0625,
+ 0.25,
+ 0.21875,
+ 0.3125,
+ 0.25,
+ 0.15625,
+ 0.15625,
+ 0.21875,
+ 0.25,
+ 0.09375,
+ 0.1875,
+ 0.375,
+ 0.1875,
+ 0.25,
+ 0.15625,
+ 0.25,
+ 0.34375,
+ 0.25,
+ 0.15625,
+ 0.25,
+ 0.25,
+ 0.1875,
+ 0.1875,
+ 0.25,
+ 0.15625,
+ 0.1875,
+ 0.21875,
+ 0.15625,
+ 0.15625,
+ 0.1875,
+ 0.09375,
+ 0.25,
+ 0.25,
+ 0.15625,
+ 0.25,
+ 0.125,
+ 0.25,
+ 0.21875,
+ 0.25,
+ 0.21875,
+ 0.15625,
+ 0.09375,
+ 0.28125,
+ 0.25,
+ 0.21875,
+ 0.15625,
+ 0.25,
+ 0.125,
+ 0.3125,
+ 0.15625,
+ 0.09375,
+ 0.25,
+ 0.28125,
+ 0.21875,
+ 0.25,
+ 0.21875,
+ 0.34375,
+ 0.25,
+ 0.21875,
+ 0.21875,
+ 0.375,
+ 0.21875,
+ 0.125,
+ 0.28125,
+ 0.28125,
+ 0.25,
+ 0.25,
+ 0.09375,
+ 0.21875,
+ 0.28125,
+ 0.28125,
+ 0.375,
+ 0.15625,
+ 0.1875,
+ 0.15625,
+ 0.3125,
+ 0.375,
+ 0.28125,
+ 0.34375,
+ 0.15625,
+ 0.28125,
+ 0.21875,
+ 0.25,
+ 0.25,
+ 0.25,
+ 0.5625,
+ 0.1875,
+ 0.3125,
+ 0.28125,
+ 0.28125,
+ 0.1875,
+ 0.34375,
+ 0.3125,
+ 0.25,
+ 0.25,
+ 0.3125,
+ 0.28125,
+ 0.3125,
+ 0.25,
+ 0.09375,
+ 0.34375,
+ 0.34375,
+ 0.28125,
+ 0.3125,
+ 0.34375,
+ 0.15625,
+ 0.1875,
+ 0.3125,
+ 0.375,
+ 0.28125,
+ 0.1875,
+ 0.1875,
+ 0.34375,
+ 0.21875,
+ 0.375,
+ 0.21875,
+ 0.28125,
+ 0.40625,
+ 0.4375,
+ 0.28125,
+ 0.25,
+ 0.375,
+ 0.21875
+ ],
+ "test_battle_won_mean_T": [
+ 194,
+ 10374,
+ 20428,
+ 30500,
+ 40676,
+ 50873,
+ 61038,
+ 71210,
+ 81230,
+ 91272,
+ 101424,
+ 111431,
+ 121432,
+ 131673,
+ 141819,
+ 151830,
+ 161986,
+ 172109,
+ 182334,
+ 192453,
+ 202496,
+ 212599,
+ 222635,
+ 232764,
+ 242814,
+ 252819,
+ 262843,
+ 272899,
+ 283084,
+ 293094,
+ 303310,
+ 313472,
+ 323617,
+ 333784,
+ 343903,
+ 353921,
+ 364085,
+ 374287,
+ 384290,
+ 394296,
+ 404471,
+ 414560,
+ 424686,
+ 434696,
+ 444926,
+ 455104,
+ 465150,
+ 475252,
+ 485342,
+ 495399,
+ 505436,
+ 515696,
+ 525789,
+ 535930,
+ 545945,
+ 555945,
+ 566049,
+ 576122,
+ 586317,
+ 596519,
+ 606748,
+ 616889,
+ 627158,
+ 637260,
+ 647263,
+ 657273,
+ 667473,
+ 677721,
+ 687849,
+ 698027,
+ 708100,
+ 718102,
+ 728312,
+ 738459,
+ 748659,
+ 758695,
+ 768913,
+ 778922,
+ 789065,
+ 799266,
+ 809346,
+ 819530,
+ 829586,
+ 839697,
+ 849860,
+ 860025,
+ 870220,
+ 880415,
+ 890511,
+ 900525,
+ 910705,
+ 920715,
+ 930760,
+ 940766,
+ 950940,
+ 961103,
+ 971321,
+ 981450,
+ 991606,
+ 1001651,
+ 1011710,
+ 1021718,
+ 1031978,
+ 1042077,
+ 1052110,
+ 1062289,
+ 1072420,
+ 1082524,
+ 1092746,
+ 1102920,
+ 1112940,
+ 1122984,
+ 1133088,
+ 1143284,
+ 1153391,
+ 1163395,
+ 1173498,
+ 1183612,
+ 1193776,
+ 1203953,
+ 1213973,
+ 1224024,
+ 1234153,
+ 1244295,
+ 1254356,
+ 1264497,
+ 1274565,
+ 1284604,
+ 1294621,
+ 1304713,
+ 1314851,
+ 1325035,
+ 1335188,
+ 1345253,
+ 1355510,
+ 1365716,
+ 1375874,
+ 1386066,
+ 1396208,
+ 1406386,
+ 1416515,
+ 1426604,
+ 1436650,
+ 1446726,
+ 1456821,
+ 1466930,
+ 1477099,
+ 1487150,
+ 1497352,
+ 1507391,
+ 1517564,
+ 1527624,
+ 1537679,
+ 1547897,
+ 1558095,
+ 1568161,
+ 1578315,
+ 1588334,
+ 1598393,
+ 1608483,
+ 1618592,
+ 1628657,
+ 1638781,
+ 1649029,
+ 1659119,
+ 1669119,
+ 1679375,
+ 1689413,
+ 1699570,
+ 1709768,
+ 1719828,
+ 1729948,
+ 1740177,
+ 1750184,
+ 1760371,
+ 1770392,
+ 1780586,
+ 1790740,
+ 1800756,
+ 1810941,
+ 1820956,
+ 1831037,
+ 1841313,
+ 1851534,
+ 1861656,
+ 1871864,
+ 1882025,
+ 1892117,
+ 1902270,
+ 1912333,
+ 1922467,
+ 1932708,
+ 1942801,
+ 1952927,
+ 1963074,
+ 1973090,
+ 1983234,
+ 1993269,
+ 2003474,
+ 2013561,
+ 2023661,
+ 2033881,
+ 2043963,
+ 2054119,
+ 2064154,
+ 2074351,
+ 2084444,
+ 2094600,
+ 2104748,
+ 2114931,
+ 2124947,
+ 2135163,
+ 2145354,
+ 2155508,
+ 2165692,
+ 2175715,
+ 2185926,
+ 2196148,
+ 2206249,
+ 2216432,
+ 2226606,
+ 2236624,
+ 2246771,
+ 2256780,
+ 2266859,
+ 2276957,
+ 2287104,
+ 2297302,
+ 2307303,
+ 2317390,
+ 2327462,
+ 2337611,
+ 2347768,
+ 2357940,
+ 2368145,
+ 2378307,
+ 2388379,
+ 2398570,
+ 2408757,
+ 2418798,
+ 2428888,
+ 2438913,
+ 2449116,
+ 2459260,
+ 2469434,
+ 2479626,
+ 2489833,
+ 2499991,
+ 2510020,
+ 2520059,
+ 2530270,
+ 2540433,
+ 2550496,
+ 2560624,
+ 2570804,
+ 2580874,
+ 2590993,
+ 2601088,
+ 2611125,
+ 2621242,
+ 2631351,
+ 2641522,
+ 2651675,
+ 2661736,
+ 2671810,
+ 2681946,
+ 2692137,
+ 2702320,
+ 2712415,
+ 2722550,
+ 2732570,
+ 2742690,
+ 2752870,
+ 2762941,
+ 2773018,
+ 2783159,
+ 2793224,
+ 2803361,
+ 2813487,
+ 2823679,
+ 2833919,
+ 2844122,
+ 2854371,
+ 2864515,
+ 2874568,
+ 2884568,
+ 2894772,
+ 2904828,
+ 2914915,
+ 2925139,
+ 2935385,
+ 2945575,
+ 2955784,
+ 2965878,
+ 2976079,
+ 2986111,
+ 2996149,
+ 3006390,
+ 3016621,
+ 3026662,
+ 3036882,
+ 3046930,
+ 3057109,
+ 3067327,
+ 3077504,
+ 3087571,
+ 3097605,
+ 3107719,
+ 3117777,
+ 3128009,
+ 3138168,
+ 3148229,
+ 3158481,
+ 3168634,
+ 3178702,
+ 3188844,
+ 3199082,
+ 3209229,
+ 3219299,
+ 3229532,
+ 3239753,
+ 3249900,
+ 3259919,
+ 3270054,
+ 3280228,
+ 3290271,
+ 3300374,
+ 3310465,
+ 3320623,
+ 3330643,
+ 3340717,
+ 3350810,
+ 3360905,
+ 3371083,
+ 3381150,
+ 3391367,
+ 3401585,
+ 3411654,
+ 3421858,
+ 3431907,
+ 3442063,
+ 3452100,
+ 3462350,
+ 3472490,
+ 3482659,
+ 3492889,
+ 3503095,
+ 3513118,
+ 3523288,
+ 3533471,
+ 3543630,
+ 3553721,
+ 3563794,
+ 3573998,
+ 3584105,
+ 3594290,
+ 3604367,
+ 3614553,
+ 3624782,
+ 3634831,
+ 3644898,
+ 3654942,
+ 3664952,
+ 3675136,
+ 3685260,
+ 3695334,
+ 3705582,
+ 3715735,
+ 3725762,
+ 3735774,
+ 3745999,
+ 3756145,
+ 3766225,
+ 3776477,
+ 3786555,
+ 3796691,
+ 3806809,
+ 3816898,
+ 3826969,
+ 3837146,
+ 3847388,
+ 3857506,
+ 3867569,
+ 3877601,
+ 3887789,
+ 3897865,
+ 3907956,
+ 3918081,
+ 3928149,
+ 3938292,
+ 3948357,
+ 3958542,
+ 3968658,
+ 3978747,
+ 3988818,
+ 3999000,
+ 4009013,
+ 4019014,
+ 4029173,
+ 4039250,
+ 4049428
+ ],
+ "test_dead_allies_mean": [
+ 5.0,
+ 5.0,
+ 5.0,
+ 4.875,
+ 4.78125,
+ 4.65625,
+ 4.71875,
+ 4.59375,
+ 4.6875,
+ 4.78125,
+ 5.0,
+ 4.875,
+ 5.0,
+ 4.6875,
+ 4.5625,
+ 4.90625,
+ 4.90625,
+ 4.71875,
+ 4.9375,
+ 4.78125,
+ 4.84375,
+ 4.59375,
+ 4.8125,
+ 4.5625,
+ 4.96875,
+ 4.84375,
+ 4.46875,
+ 4.71875,
+ 4.5625,
+ 4.75,
+ 4.8125,
+ 4.90625,
+ 4.59375,
+ 4.625,
+ 4.8125,
+ 4.5625,
+ 4.59375,
+ 4.65625,
+ 4.65625,
+ 4.6875,
+ 4.71875,
+ 4.6875,
+ 4.53125,
+ 4.59375,
+ 4.4375,
+ 4.65625,
+ 4.625,
+ 4.71875,
+ 4.9375,
+ 4.46875,
+ 4.625,
+ 4.6875,
+ 4.53125,
+ 4.90625,
+ 4.59375,
+ 4.65625,
+ 4.34375,
+ 4.8125,
+ 4.6875,
+ 4.28125,
+ 4.4375,
+ 4.6875,
+ 4.71875,
+ 4.71875,
+ 4.21875,
+ 4.34375,
+ 4.5,
+ 4.53125,
+ 4.59375,
+ 4.78125,
+ 4.28125,
+ 4.5,
+ 4.5625,
+ 4.5625,
+ 4.53125,
+ 4.625,
+ 4.4375,
+ 4.625,
+ 4.53125,
+ 4.75,
+ 4.5,
+ 4.625,
+ 4.46875,
+ 4.625,
+ 4.3125,
+ 4.28125,
+ 4.46875,
+ 4.78125,
+ 4.6875,
+ 4.625,
+ 4.5625,
+ 4.75,
+ 4.4375,
+ 4.6875,
+ 4.46875,
+ 4.25,
+ 4.65625,
+ 4.65625,
+ 4.4375,
+ 4.75,
+ 4.8125,
+ 4.59375,
+ 4.8125,
+ 4.46875,
+ 4.375,
+ 4.8125,
+ 4.6875,
+ 4.78125,
+ 4.53125,
+ 4.65625,
+ 4.75,
+ 4.6875,
+ 4.65625,
+ 4.78125,
+ 4.6875,
+ 4.65625,
+ 4.0625,
+ 4.84375,
+ 4.8125,
+ 4.65625,
+ 4.59375,
+ 4.625,
+ 4.75,
+ 4.8125,
+ 4.5,
+ 4.71875,
+ 4.40625,
+ 4.3125,
+ 4.65625,
+ 4.1875,
+ 4.59375,
+ 4.46875,
+ 4.46875,
+ 4.90625,
+ 4.65625,
+ 4.625,
+ 4.625,
+ 4.40625,
+ 4.375,
+ 4.53125,
+ 4.3125,
+ 4.46875,
+ 4.5,
+ 4.5625,
+ 4.5625,
+ 4.59375,
+ 4.34375,
+ 4.5625,
+ 4.40625,
+ 4.40625,
+ 4.875,
+ 4.46875,
+ 4.40625,
+ 4.5625,
+ 4.75,
+ 4.25,
+ 4.53125,
+ 4.28125,
+ 4.6875,
+ 4.8125,
+ 4.375,
+ 4.25,
+ 4.625,
+ 4.5625,
+ 4.5,
+ 4.5625,
+ 4.25,
+ 4.53125,
+ 4.625,
+ 4.71875,
+ 4.5,
+ 4.53125,
+ 4.375,
+ 3.875,
+ 4.4375,
+ 4.28125,
+ 4.71875,
+ 4.6875,
+ 4.71875,
+ 4.46875,
+ 4.625,
+ 4.46875,
+ 4.5625,
+ 4.84375,
+ 4.3125,
+ 4.625,
+ 4.46875,
+ 4.5,
+ 4.53125,
+ 4.34375,
+ 4.375,
+ 4.625,
+ 4.65625,
+ 4.53125,
+ 4.3125,
+ 4.40625,
+ 4.71875,
+ 4.46875,
+ 4.28125,
+ 4.75,
+ 4.40625,
+ 4.5,
+ 4.8125,
+ 4.65625,
+ 4.75,
+ 4.25,
+ 4.78125,
+ 4.5625,
+ 4.75,
+ 4.71875,
+ 4.6875,
+ 4.0625,
+ 4.71875,
+ 4.375,
+ 4.15625,
+ 4.125,
+ 4.4375,
+ 4.34375,
+ 4.21875,
+ 4.34375,
+ 4.4375,
+ 4.4375,
+ 4.53125,
+ 4.5,
+ 4.28125,
+ 4.46875,
+ 4.625,
+ 4.59375,
+ 4.5625,
+ 4.6875,
+ 4.65625,
+ 4.53125,
+ 4.09375,
+ 4.34375,
+ 4.46875,
+ 4.4375,
+ 4.5625,
+ 4.46875,
+ 4.46875,
+ 4.4375,
+ 4.65625,
+ 4.125,
+ 4.5625,
+ 3.96875,
+ 4.34375,
+ 4.65625,
+ 4.6875,
+ 4.5,
+ 4.5,
+ 4.40625,
+ 4.59375,
+ 4.84375,
+ 4.34375,
+ 4.5625,
+ 3.8125,
+ 4.1875,
+ 4.28125,
+ 4.71875,
+ 4.40625,
+ 4.34375,
+ 4.40625,
+ 4.21875,
+ 4.34375,
+ 4.6875,
+ 3.9375,
+ 4.28125,
+ 4.03125,
+ 4.625,
+ 4.15625,
+ 4.375,
+ 4.375,
+ 4.5625,
+ 4.53125,
+ 4.40625,
+ 4.5625,
+ 4.375,
+ 4.65625,
+ 4.8125,
+ 4.84375,
+ 4.375,
+ 4.25,
+ 4.28125,
+ 4.28125,
+ 4.65625,
+ 4.6875,
+ 4.53125,
+ 4.3125,
+ 4.84375,
+ 4.59375,
+ 4.125,
+ 4.53125,
+ 4.375,
+ 4.65625,
+ 4.40625,
+ 4.34375,
+ 4.28125,
+ 4.53125,
+ 4.5,
+ 4.28125,
+ 4.5,
+ 4.65625,
+ 4.4375,
+ 4.5,
+ 4.53125,
+ 4.40625,
+ 4.65625,
+ 4.71875,
+ 4.5,
+ 4.6875,
+ 4.25,
+ 4.25,
+ 4.65625,
+ 4.375,
+ 4.6875,
+ 4.5625,
+ 4.46875,
+ 4.4375,
+ 4.4375,
+ 4.75,
+ 4.78125,
+ 4.3125,
+ 4.375,
+ 4.46875,
+ 4.4375,
+ 4.375,
+ 4.65625,
+ 4.4375,
+ 4.59375,
+ 4.78125,
+ 4.4375,
+ 4.28125,
+ 4.53125,
+ 4.5,
+ 4.5,
+ 4.125,
+ 4.40625,
+ 4.53125,
+ 4.53125,
+ 3.9375,
+ 4.28125,
+ 4.75,
+ 4.3125,
+ 4.5,
+ 4.40625,
+ 4.0625,
+ 4.78125,
+ 4.5625,
+ 4.4375,
+ 4.34375,
+ 4.03125,
+ 4.59375,
+ 4.59375,
+ 4.75,
+ 4.25,
+ 4.0,
+ 4.125,
+ 4.25,
+ 4.6875,
+ 4.34375,
+ 4.375,
+ 4.28125,
+ 4.5,
+ 4.53125,
+ 3.53125,
+ 4.5,
+ 4.25,
+ 4.15625,
+ 4.09375,
+ 4.53125,
+ 4.15625,
+ 4.4375,
+ 4.3125,
+ 4.53125,
+ 4.21875,
+ 4.5,
+ 4.21875,
+ 4.375,
+ 4.75,
+ 4.125,
+ 4.15625,
+ 4.375,
+ 4.28125,
+ 4.1875,
+ 4.625,
+ 4.59375,
+ 4.03125,
+ 4.125,
+ 4.5,
+ 4.6875,
+ 4.5,
+ 4.1875,
+ 4.53125,
+ 4.09375,
+ 4.46875,
+ 4.4375,
+ 4.1875,
+ 3.78125,
+ 4.3125,
+ 4.4375,
+ 4.28125,
+ 4.5625
+ ],
+ "test_dead_allies_mean_T": [
+ 194,
+ 10374,
+ 20428,
+ 30500,
+ 40676,
+ 50873,
+ 61038,
+ 71210,
+ 81230,
+ 91272,
+ 101424,
+ 111431,
+ 121432,
+ 131673,
+ 141819,
+ 151830,
+ 161986,
+ 172109,
+ 182334,
+ 192453,
+ 202496,
+ 212599,
+ 222635,
+ 232764,
+ 242814,
+ 252819,
+ 262843,
+ 272899,
+ 283084,
+ 293094,
+ 303310,
+ 313472,
+ 323617,
+ 333784,
+ 343903,
+ 353921,
+ 364085,
+ 374287,
+ 384290,
+ 394296,
+ 404471,
+ 414560,
+ 424686,
+ 434696,
+ 444926,
+ 455104,
+ 465150,
+ 475252,
+ 485342,
+ 495399,
+ 505436,
+ 515696,
+ 525789,
+ 535930,
+ 545945,
+ 555945,
+ 566049,
+ 576122,
+ 586317,
+ 596519,
+ 606748,
+ 616889,
+ 627158,
+ 637260,
+ 647263,
+ 657273,
+ 667473,
+ 677721,
+ 687849,
+ 698027,
+ 708100,
+ 718102,
+ 728312,
+ 738459,
+ 748659,
+ 758695,
+ 768913,
+ 778922,
+ 789065,
+ 799266,
+ 809346,
+ 819530,
+ 829586,
+ 839697,
+ 849860,
+ 860025,
+ 870220,
+ 880415,
+ 890511,
+ 900525,
+ 910705,
+ 920715,
+ 930760,
+ 940766,
+ 950940,
+ 961103,
+ 971321,
+ 981450,
+ 991606,
+ 1001651,
+ 1011710,
+ 1021718,
+ 1031978,
+ 1042077,
+ 1052110,
+ 1062289,
+ 1072420,
+ 1082524,
+ 1092746,
+ 1102920,
+ 1112940,
+ 1122984,
+ 1133088,
+ 1143284,
+ 1153391,
+ 1163395,
+ 1173498,
+ 1183612,
+ 1193776,
+ 1203953,
+ 1213973,
+ 1224024,
+ 1234153,
+ 1244295,
+ 1254356,
+ 1264497,
+ 1274565,
+ 1284604,
+ 1294621,
+ 1304713,
+ 1314851,
+ 1325035,
+ 1335188,
+ 1345253,
+ 1355510,
+ 1365716,
+ 1375874,
+ 1386066,
+ 1396208,
+ 1406386,
+ 1416515,
+ 1426604,
+ 1436650,
+ 1446726,
+ 1456821,
+ 1466930,
+ 1477099,
+ 1487150,
+ 1497352,
+ 1507391,
+ 1517564,
+ 1527624,
+ 1537679,
+ 1547897,
+ 1558095,
+ 1568161,
+ 1578315,
+ 1588334,
+ 1598393,
+ 1608483,
+ 1618592,
+ 1628657,
+ 1638781,
+ 1649029,
+ 1659119,
+ 1669119,
+ 1679375,
+ 1689413,
+ 1699570,
+ 1709768,
+ 1719828,
+ 1729948,
+ 1740177,
+ 1750184,
+ 1760371,
+ 1770392,
+ 1780586,
+ 1790740,
+ 1800756,
+ 1810941,
+ 1820956,
+ 1831037,
+ 1841313,
+ 1851534,
+ 1861656,
+ 1871864,
+ 1882025,
+ 1892117,
+ 1902270,
+ 1912333,
+ 1922467,
+ 1932708,
+ 1942801,
+ 1952927,
+ 1963074,
+ 1973090,
+ 1983234,
+ 1993269,
+ 2003474,
+ 2013561,
+ 2023661,
+ 2033881,
+ 2043963,
+ 2054119,
+ 2064154,
+ 2074351,
+ 2084444,
+ 2094600,
+ 2104748,
+ 2114931,
+ 2124947,
+ 2135163,
+ 2145354,
+ 2155508,
+ 2165692,
+ 2175715,
+ 2185926,
+ 2196148,
+ 2206249,
+ 2216432,
+ 2226606,
+ 2236624,
+ 2246771,
+ 2256780,
+ 2266859,
+ 2276957,
+ 2287104,
+ 2297302,
+ 2307303,
+ 2317390,
+ 2327462,
+ 2337611,
+ 2347768,
+ 2357940,
+ 2368145,
+ 2378307,
+ 2388379,
+ 2398570,
+ 2408757,
+ 2418798,
+ 2428888,
+ 2438913,
+ 2449116,
+ 2459260,
+ 2469434,
+ 2479626,
+ 2489833,
+ 2499991,
+ 2510020,
+ 2520059,
+ 2530270,
+ 2540433,
+ 2550496,
+ 2560624,
+ 2570804,
+ 2580874,
+ 2590993,
+ 2601088,
+ 2611125,
+ 2621242,
+ 2631351,
+ 2641522,
+ 2651675,
+ 2661736,
+ 2671810,
+ 2681946,
+ 2692137,
+ 2702320,
+ 2712415,
+ 2722550,
+ 2732570,
+ 2742690,
+ 2752870,
+ 2762941,
+ 2773018,
+ 2783159,
+ 2793224,
+ 2803361,
+ 2813487,
+ 2823679,
+ 2833919,
+ 2844122,
+ 2854371,
+ 2864515,
+ 2874568,
+ 2884568,
+ 2894772,
+ 2904828,
+ 2914915,
+ 2925139,
+ 2935385,
+ 2945575,
+ 2955784,
+ 2965878,
+ 2976079,
+ 2986111,
+ 2996149,
+ 3006390,
+ 3016621,
+ 3026662,
+ 3036882,
+ 3046930,
+ 3057109,
+ 3067327,
+ 3077504,
+ 3087571,
+ 3097605,
+ 3107719,
+ 3117777,
+ 3128009,
+ 3138168,
+ 3148229,
+ 3158481,
+ 3168634,
+ 3178702,
+ 3188844,
+ 3199082,
+ 3209229,
+ 3219299,
+ 3229532,
+ 3239753,
+ 3249900,
+ 3259919,
+ 3270054,
+ 3280228,
+ 3290271,
+ 3300374,
+ 3310465,
+ 3320623,
+ 3330643,
+ 3340717,
+ 3350810,
+ 3360905,
+ 3371083,
+ 3381150,
+ 3391367,
+ 3401585,
+ 3411654,
+ 3421858,
+ 3431907,
+ 3442063,
+ 3452100,
+ 3462350,
+ 3472490,
+ 3482659,
+ 3492889,
+ 3503095,
+ 3513118,
+ 3523288,
+ 3533471,
+ 3543630,
+ 3553721,
+ 3563794,
+ 3573998,
+ 3584105,
+ 3594290,
+ 3604367,
+ 3614553,
+ 3624782,
+ 3634831,
+ 3644898,
+ 3654942,
+ 3664952,
+ 3675136,
+ 3685260,
+ 3695334,
+ 3705582,
+ 3715735,
+ 3725762,
+ 3735774,
+ 3745999,
+ 3756145,
+ 3766225,
+ 3776477,
+ 3786555,
+ 3796691,
+ 3806809,
+ 3816898,
+ 3826969,
+ 3837146,
+ 3847388,
+ 3857506,
+ 3867569,
+ 3877601,
+ 3887789,
+ 3897865,
+ 3907956,
+ 3918081,
+ 3928149,
+ 3938292,
+ 3948357,
+ 3958542,
+ 3968658,
+ 3978747,
+ 3988818,
+ 3999000,
+ 4009013,
+ 4019014,
+ 4029173,
+ 4039250,
+ 4049428
+ ],
+ "test_dead_enemies_mean": [
+ 0.0,
+ 0.0,
+ 2.25,
+ 2.15625,
+ 2.125,
+ 2.28125,
+ 2.4375,
+ 2.34375,
+ 2.5,
+ 1.71875,
+ 1.9375,
+ 1.78125,
+ 1.3125,
+ 1.59375,
+ 1.84375,
+ 1.8125,
+ 1.78125,
+ 2.46875,
+ 1.6875,
+ 2.0625,
+ 1.8125,
+ 2.71875,
+ 2.375,
+ 2.59375,
+ 2.15625,
+ 2.40625,
+ 2.53125,
+ 2.125,
+ 2.4375,
+ 2.5625,
+ 2.3125,
+ 2.15625,
+ 2.75,
+ 2.3125,
+ 1.90625,
+ 2.5,
+ 2.5,
+ 2.125,
+ 2.3125,
+ 2.1875,
+ 2.6875,
+ 2.65625,
+ 2.96875,
+ 2.375,
+ 2.65625,
+ 2.8125,
+ 2.3125,
+ 2.5625,
+ 2.34375,
+ 2.8125,
+ 2.46875,
+ 2.8125,
+ 2.90625,
+ 2.09375,
+ 2.8125,
+ 2.125,
+ 2.9375,
+ 2.34375,
+ 2.375,
+ 3.09375,
+ 2.5,
+ 2.375,
+ 2.6875,
+ 2.46875,
+ 3.0,
+ 2.6875,
+ 2.71875,
+ 2.3125,
+ 2.40625,
+ 2.46875,
+ 2.84375,
+ 2.78125,
+ 2.71875,
+ 2.65625,
+ 2.90625,
+ 2.625,
+ 2.4375,
+ 2.5,
+ 2.59375,
+ 2.40625,
+ 2.5625,
+ 2.09375,
+ 2.6875,
+ 2.65625,
+ 2.96875,
+ 3.1875,
+ 2.875,
+ 2.1875,
+ 2.125,
+ 2.75,
+ 2.75,
+ 2.5,
+ 2.90625,
+ 2.5,
+ 2.5625,
+ 2.78125,
+ 2.625,
+ 2.3125,
+ 2.8125,
+ 2.5,
+ 2.03125,
+ 2.875,
+ 2.03125,
+ 2.34375,
+ 2.75,
+ 2.125,
+ 2.5625,
+ 2.28125,
+ 2.78125,
+ 2.9375,
+ 2.125,
+ 2.4375,
+ 2.59375,
+ 2.5625,
+ 2.71875,
+ 2.3125,
+ 3.125,
+ 2.75,
+ 2.375,
+ 2.4375,
+ 3.125,
+ 3.0,
+ 2.78125,
+ 2.46875,
+ 2.9375,
+ 2.71875,
+ 3.03125,
+ 3.28125,
+ 2.59375,
+ 3.0,
+ 2.375,
+ 3.0625,
+ 3.03125,
+ 2.59375,
+ 2.78125,
+ 2.84375,
+ 2.4375,
+ 3.03125,
+ 2.875,
+ 2.71875,
+ 3.28125,
+ 3.0625,
+ 2.75,
+ 2.96875,
+ 2.625,
+ 3.03125,
+ 3.03125,
+ 2.625,
+ 2.875,
+ 3.25,
+ 2.65625,
+ 2.4375,
+ 2.65625,
+ 2.5625,
+ 2.5,
+ 3.1875,
+ 2.90625,
+ 3.25,
+ 2.75,
+ 2.59375,
+ 2.6875,
+ 3.21875,
+ 2.4375,
+ 2.5625,
+ 2.75,
+ 2.8125,
+ 3.71875,
+ 3.1875,
+ 2.9375,
+ 2.71875,
+ 2.9375,
+ 2.59375,
+ 3.125,
+ 3.3125,
+ 3.375,
+ 3.03125,
+ 2.5625,
+ 2.875,
+ 2.71875,
+ 3.0625,
+ 2.84375,
+ 3.0625,
+ 2.71875,
+ 2.5,
+ 3.28125,
+ 2.875,
+ 2.90625,
+ 3.09375,
+ 2.84375,
+ 3.25,
+ 3.0625,
+ 2.71875,
+ 2.8125,
+ 2.9375,
+ 3.0625,
+ 3.0,
+ 2.71875,
+ 2.65625,
+ 3.65625,
+ 2.875,
+ 3.0625,
+ 2.84375,
+ 2.53125,
+ 3.03125,
+ 2.78125,
+ 3.53125,
+ 2.78125,
+ 3.03125,
+ 2.53125,
+ 2.6875,
+ 2.75,
+ 3.28125,
+ 2.6875,
+ 3.09375,
+ 3.21875,
+ 3.15625,
+ 3.21875,
+ 3.125,
+ 2.96875,
+ 3.28125,
+ 3.15625,
+ 2.6875,
+ 3.1875,
+ 2.84375,
+ 3.3125,
+ 3.28125,
+ 3.1875,
+ 3.1875,
+ 2.84375,
+ 2.84375,
+ 2.5,
+ 3.03125,
+ 3.15625,
+ 3.40625,
+ 3.03125,
+ 3.0,
+ 2.78125,
+ 2.96875,
+ 3.125,
+ 3.34375,
+ 2.84375,
+ 3.09375,
+ 2.78125,
+ 3.59375,
+ 3.03125,
+ 2.53125,
+ 2.6875,
+ 3.15625,
+ 2.96875,
+ 3.0625,
+ 2.5,
+ 2.625,
+ 3.46875,
+ 3.0,
+ 3.6875,
+ 3.34375,
+ 3.28125,
+ 2.5625,
+ 3.25,
+ 2.9375,
+ 3.15625,
+ 3.21875,
+ 3.125,
+ 2.75,
+ 3.15625,
+ 3.25,
+ 3.5,
+ 2.90625,
+ 3.34375,
+ 2.96875,
+ 3.375,
+ 2.75,
+ 2.78125,
+ 3.125,
+ 3.125,
+ 3.125,
+ 3.03125,
+ 2.78125,
+ 2.375,
+ 3.25,
+ 3.03125,
+ 3.09375,
+ 3.0625,
+ 2.75,
+ 2.71875,
+ 2.84375,
+ 2.9375,
+ 2.90625,
+ 2.96875,
+ 3.53125,
+ 2.875,
+ 3.25,
+ 2.8125,
+ 3.15625,
+ 3.5625,
+ 2.8125,
+ 2.75,
+ 3.53125,
+ 3.0625,
+ 2.84375,
+ 3.125,
+ 3.34375,
+ 3.09375,
+ 3.125,
+ 3.1875,
+ 3.03125,
+ 2.96875,
+ 2.8125,
+ 2.375,
+ 3.3125,
+ 3.15625,
+ 3.125,
+ 3.40625,
+ 2.40625,
+ 3.34375,
+ 3.03125,
+ 2.875,
+ 2.96875,
+ 2.875,
+ 2.625,
+ 3.34375,
+ 3.25,
+ 2.84375,
+ 2.8125,
+ 3.1875,
+ 2.96875,
+ 2.84375,
+ 3.25,
+ 2.6875,
+ 3.15625,
+ 2.9375,
+ 3.125,
+ 2.75,
+ 3.0625,
+ 3.5625,
+ 3.1875,
+ 2.75,
+ 3.3125,
+ 3.21875,
+ 3.28125,
+ 2.625,
+ 3.4375,
+ 3.1875,
+ 3.09375,
+ 3.15625,
+ 2.625,
+ 3.03125,
+ 3.21875,
+ 3.0625,
+ 3.75,
+ 2.90625,
+ 3.28125,
+ 2.9375,
+ 2.96875,
+ 3.65625,
+ 3.25,
+ 3.21875,
+ 2.96875,
+ 2.9375,
+ 2.8125,
+ 3.34375,
+ 3.03125,
+ 3.03125,
+ 4.03125,
+ 2.84375,
+ 3.4375,
+ 3.34375,
+ 2.96875,
+ 2.84375,
+ 3.375,
+ 3.3125,
+ 3.0625,
+ 2.96875,
+ 3.46875,
+ 2.8125,
+ 3.125,
+ 3.1875,
+ 2.6875,
+ 3.40625,
+ 3.53125,
+ 3.0625,
+ 3.53125,
+ 3.1875,
+ 3.03125,
+ 3.1875,
+ 3.53125,
+ 3.59375,
+ 3.0625,
+ 2.75,
+ 3.125,
+ 3.3125,
+ 2.84375,
+ 3.4375,
+ 3.09375,
+ 3.375,
+ 3.5625,
+ 3.71875,
+ 3.15625,
+ 3.375,
+ 3.3125,
+ 3.0625
+ ],
+ "test_dead_enemies_mean_T": [
+ 194,
+ 10374,
+ 20428,
+ 30500,
+ 40676,
+ 50873,
+ 61038,
+ 71210,
+ 81230,
+ 91272,
+ 101424,
+ 111431,
+ 121432,
+ 131673,
+ 141819,
+ 151830,
+ 161986,
+ 172109,
+ 182334,
+ 192453,
+ 202496,
+ 212599,
+ 222635,
+ 232764,
+ 242814,
+ 252819,
+ 262843,
+ 272899,
+ 283084,
+ 293094,
+ 303310,
+ 313472,
+ 323617,
+ 333784,
+ 343903,
+ 353921,
+ 364085,
+ 374287,
+ 384290,
+ 394296,
+ 404471,
+ 414560,
+ 424686,
+ 434696,
+ 444926,
+ 455104,
+ 465150,
+ 475252,
+ 485342,
+ 495399,
+ 505436,
+ 515696,
+ 525789,
+ 535930,
+ 545945,
+ 555945,
+ 566049,
+ 576122,
+ 586317,
+ 596519,
+ 606748,
+ 616889,
+ 627158,
+ 637260,
+ 647263,
+ 657273,
+ 667473,
+ 677721,
+ 687849,
+ 698027,
+ 708100,
+ 718102,
+ 728312,
+ 738459,
+ 748659,
+ 758695,
+ 768913,
+ 778922,
+ 789065,
+ 799266,
+ 809346,
+ 819530,
+ 829586,
+ 839697,
+ 849860,
+ 860025,
+ 870220,
+ 880415,
+ 890511,
+ 900525,
+ 910705,
+ 920715,
+ 930760,
+ 940766,
+ 950940,
+ 961103,
+ 971321,
+ 981450,
+ 991606,
+ 1001651,
+ 1011710,
+ 1021718,
+ 1031978,
+ 1042077,
+ 1052110,
+ 1062289,
+ 1072420,
+ 1082524,
+ 1092746,
+ 1102920,
+ 1112940,
+ 1122984,
+ 1133088,
+ 1143284,
+ 1153391,
+ 1163395,
+ 1173498,
+ 1183612,
+ 1193776,
+ 1203953,
+ 1213973,
+ 1224024,
+ 1234153,
+ 1244295,
+ 1254356,
+ 1264497,
+ 1274565,
+ 1284604,
+ 1294621,
+ 1304713,
+ 1314851,
+ 1325035,
+ 1335188,
+ 1345253,
+ 1355510,
+ 1365716,
+ 1375874,
+ 1386066,
+ 1396208,
+ 1406386,
+ 1416515,
+ 1426604,
+ 1436650,
+ 1446726,
+ 1456821,
+ 1466930,
+ 1477099,
+ 1487150,
+ 1497352,
+ 1507391,
+ 1517564,
+ 1527624,
+ 1537679,
+ 1547897,
+ 1558095,
+ 1568161,
+ 1578315,
+ 1588334,
+ 1598393,
+ 1608483,
+ 1618592,
+ 1628657,
+ 1638781,
+ 1649029,
+ 1659119,
+ 1669119,
+ 1679375,
+ 1689413,
+ 1699570,
+ 1709768,
+ 1719828,
+ 1729948,
+ 1740177,
+ 1750184,
+ 1760371,
+ 1770392,
+ 1780586,
+ 1790740,
+ 1800756,
+ 1810941,
+ 1820956,
+ 1831037,
+ 1841313,
+ 1851534,
+ 1861656,
+ 1871864,
+ 1882025,
+ 1892117,
+ 1902270,
+ 1912333,
+ 1922467,
+ 1932708,
+ 1942801,
+ 1952927,
+ 1963074,
+ 1973090,
+ 1983234,
+ 1993269,
+ 2003474,
+ 2013561,
+ 2023661,
+ 2033881,
+ 2043963,
+ 2054119,
+ 2064154,
+ 2074351,
+ 2084444,
+ 2094600,
+ 2104748,
+ 2114931,
+ 2124947,
+ 2135163,
+ 2145354,
+ 2155508,
+ 2165692,
+ 2175715,
+ 2185926,
+ 2196148,
+ 2206249,
+ 2216432,
+ 2226606,
+ 2236624,
+ 2246771,
+ 2256780,
+ 2266859,
+ 2276957,
+ 2287104,
+ 2297302,
+ 2307303,
+ 2317390,
+ 2327462,
+ 2337611,
+ 2347768,
+ 2357940,
+ 2368145,
+ 2378307,
+ 2388379,
+ 2398570,
+ 2408757,
+ 2418798,
+ 2428888,
+ 2438913,
+ 2449116,
+ 2459260,
+ 2469434,
+ 2479626,
+ 2489833,
+ 2499991,
+ 2510020,
+ 2520059,
+ 2530270,
+ 2540433,
+ 2550496,
+ 2560624,
+ 2570804,
+ 2580874,
+ 2590993,
+ 2601088,
+ 2611125,
+ 2621242,
+ 2631351,
+ 2641522,
+ 2651675,
+ 2661736,
+ 2671810,
+ 2681946,
+ 2692137,
+ 2702320,
+ 2712415,
+ 2722550,
+ 2732570,
+ 2742690,
+ 2752870,
+ 2762941,
+ 2773018,
+ 2783159,
+ 2793224,
+ 2803361,
+ 2813487,
+ 2823679,
+ 2833919,
+ 2844122,
+ 2854371,
+ 2864515,
+ 2874568,
+ 2884568,
+ 2894772,
+ 2904828,
+ 2914915,
+ 2925139,
+ 2935385,
+ 2945575,
+ 2955784,
+ 2965878,
+ 2976079,
+ 2986111,
+ 2996149,
+ 3006390,
+ 3016621,
+ 3026662,
+ 3036882,
+ 3046930,
+ 3057109,
+ 3067327,
+ 3077504,
+ 3087571,
+ 3097605,
+ 3107719,
+ 3117777,
+ 3128009,
+ 3138168,
+ 3148229,
+ 3158481,
+ 3168634,
+ 3178702,
+ 3188844,
+ 3199082,
+ 3209229,
+ 3219299,
+ 3229532,
+ 3239753,
+ 3249900,
+ 3259919,
+ 3270054,
+ 3280228,
+ 3290271,
+ 3300374,
+ 3310465,
+ 3320623,
+ 3330643,
+ 3340717,
+ 3350810,
+ 3360905,
+ 3371083,
+ 3381150,
+ 3391367,
+ 3401585,
+ 3411654,
+ 3421858,
+ 3431907,
+ 3442063,
+ 3452100,
+ 3462350,
+ 3472490,
+ 3482659,
+ 3492889,
+ 3503095,
+ 3513118,
+ 3523288,
+ 3533471,
+ 3543630,
+ 3553721,
+ 3563794,
+ 3573998,
+ 3584105,
+ 3594290,
+ 3604367,
+ 3614553,
+ 3624782,
+ 3634831,
+ 3644898,
+ 3654942,
+ 3664952,
+ 3675136,
+ 3685260,
+ 3695334,
+ 3705582,
+ 3715735,
+ 3725762,
+ 3735774,
+ 3745999,
+ 3756145,
+ 3766225,
+ 3776477,
+ 3786555,
+ 3796691,
+ 3806809,
+ 3816898,
+ 3826969,
+ 3837146,
+ 3847388,
+ 3857506,
+ 3867569,
+ 3877601,
+ 3887789,
+ 3897865,
+ 3907956,
+ 3918081,
+ 3928149,
+ 3938292,
+ 3948357,
+ 3958542,
+ 3968658,
+ 3978747,
+ 3988818,
+ 3999000,
+ 4009013,
+ 4019014,
+ 4029173,
+ 4039250,
+ 4049428
+ ],
+ "test_ep_length_mean": [
+ 61.125,
+ 48.6875,
+ 70.9375,
+ 68.15625,
+ 64.375,
+ 67.65625,
+ 63.84375,
+ 61.96875,
+ 65.78125,
+ 60.90625,
+ 63.875,
+ 60.0625,
+ 59.53125,
+ 59.34375,
+ 58.75,
+ 62.875,
+ 57.15625,
+ 65.1875,
+ 63.0625,
+ 57.71875,
+ 57.84375,
+ 66.0625,
+ 70.28125,
+ 64.65625,
+ 59.71875,
+ 69.6875,
+ 58.1875,
+ 58.03125,
+ 62.21875,
+ 65.21875,
+ 61.59375,
+ 61.5625,
+ 62.9375,
+ 57.40625,
+ 58.0625,
+ 59.0625,
+ 59.5,
+ 62.28125,
+ 56.46875,
+ 59.125,
+ 59.65625,
+ 61.90625,
+ 62.75,
+ 57.6875,
+ 57.5,
+ 61.4375,
+ 64.9375,
+ 60.03125,
+ 63.1875,
+ 57.46875,
+ 59.125,
+ 61.0,
+ 67.40625,
+ 58.25,
+ 63.5625,
+ 55.78125,
+ 61.21875,
+ 56.9375,
+ 57.4375,
+ 59.625,
+ 58.46875,
+ 59.09375,
+ 58.6875,
+ 60.3125,
+ 62.21875,
+ 57.875,
+ 60.1875,
+ 55.9375,
+ 59.15625,
+ 63.40625,
+ 58.96875,
+ 59.15625,
+ 59.4375,
+ 63.09375,
+ 61.03125,
+ 61.03125,
+ 62.53125,
+ 58.40625,
+ 62.84375,
+ 60.625,
+ 55.84375,
+ 57.625,
+ 58.65625,
+ 59.96875,
+ 56.78125,
+ 62.71875,
+ 58.59375,
+ 58.125,
+ 56.84375,
+ 62.375,
+ 59.78125,
+ 63.21875,
+ 61.75,
+ 60.5,
+ 57.65625,
+ 59.53125,
+ 58.5625,
+ 59.90625,
+ 59.34375,
+ 56.03125,
+ 57.9375,
+ 58.8125,
+ 59.1875,
+ 54.0,
+ 58.65625,
+ 55.53125,
+ 57.3125,
+ 57.875,
+ 60.65625,
+ 61.9375,
+ 57.4375,
+ 56.625,
+ 59.21875,
+ 60.1875,
+ 61.625,
+ 59.0,
+ 61.96875,
+ 58.9375,
+ 59.40625,
+ 57.125,
+ 63.1875,
+ 63.8125,
+ 62.96875,
+ 56.25,
+ 57.0,
+ 61.6875,
+ 56.4375,
+ 60.71875,
+ 57.5,
+ 59.75,
+ 58.53125,
+ 60.875,
+ 58.625,
+ 59.5625,
+ 59.0,
+ 61.9375,
+ 59.40625,
+ 61.9375,
+ 59.28125,
+ 59.78125,
+ 60.28125,
+ 61.0625,
+ 58.375,
+ 62.40625,
+ 58.28125,
+ 61.65625,
+ 59.53125,
+ 57.3125,
+ 58.9375,
+ 61.65625,
+ 62.03125,
+ 56.875,
+ 55.8125,
+ 58.71875,
+ 57.625,
+ 61.75,
+ 59.5625,
+ 60.3125,
+ 57.28125,
+ 57.90625,
+ 59.4375,
+ 59.9375,
+ 56.0625,
+ 58.125,
+ 58.21875,
+ 58.90625,
+ 68.40625,
+ 61.0625,
+ 59.90625,
+ 57.5,
+ 60.0,
+ 57.1875,
+ 67.03125,
+ 60.65625,
+ 63.5625,
+ 58.9375,
+ 59.4375,
+ 58.0625,
+ 58.625,
+ 60.71875,
+ 58.40625,
+ 61.9375,
+ 57.15625,
+ 56.28125,
+ 61.6875,
+ 63.125,
+ 60.6875,
+ 60.75,
+ 58.875,
+ 57.78125,
+ 56.78125,
+ 60.4375,
+ 62.15625,
+ 62.21875,
+ 59.40625,
+ 62.28125,
+ 60.5,
+ 57.15625,
+ 61.625,
+ 59.75,
+ 61.6875,
+ 58.6875,
+ 59.53125,
+ 61.5,
+ 60.65625,
+ 60.65625,
+ 58.53125,
+ 64.9375,
+ 58.34375,
+ 58.1875,
+ 57.875,
+ 62.46875,
+ 60.09375,
+ 60.875,
+ 59.28125,
+ 59.71875,
+ 60.4375,
+ 65.9375,
+ 58.4375,
+ 62.0625,
+ 64.375,
+ 60.8125,
+ 64.1875,
+ 61.1875,
+ 64.96875,
+ 65.21875,
+ 64.75,
+ 66.125,
+ 60.09375,
+ 61.125,
+ 62.375,
+ 62.625,
+ 62.75,
+ 66.59375,
+ 63.9375,
+ 61.90625,
+ 60.15625,
+ 59.78125,
+ 61.40625,
+ 60.09375,
+ 58.65625,
+ 61.46875,
+ 59.1875,
+ 61.1875,
+ 59.3125,
+ 60.875,
+ 59.4375,
+ 59.1875,
+ 65.15625,
+ 63.90625,
+ 60.75,
+ 59.3125,
+ 61.15625,
+ 61.78125,
+ 60.5,
+ 63.25,
+ 60.9375,
+ 57.53125,
+ 60.4375,
+ 63.53125,
+ 56.9375,
+ 60.78125,
+ 59.21875,
+ 58.875,
+ 60.65625,
+ 62.875,
+ 66.53125,
+ 61.53125,
+ 59.59375,
+ 61.65625,
+ 65.25,
+ 57.1875,
+ 62.1875,
+ 62.15625,
+ 61.28125,
+ 61.9375,
+ 63.1875,
+ 64.0,
+ 59.03125,
+ 64.375,
+ 61.59375,
+ 62.1875,
+ 62.0625,
+ 61.25,
+ 59.53125,
+ 59.625,
+ 59.03125,
+ 64.40625,
+ 60.65625,
+ 61.40625,
+ 59.5625,
+ 62.4375,
+ 61.6875,
+ 65.3125,
+ 63.6875,
+ 57.75,
+ 59.34375,
+ 71.0,
+ 58.28125,
+ 58.28125,
+ 63.21875,
+ 60.90625,
+ 62.90625,
+ 61.375,
+ 62.15625,
+ 62.21875,
+ 63.9375,
+ 56.65625,
+ 56.9375,
+ 63.6875,
+ 60.53125,
+ 59.1875,
+ 64.1875,
+ 60.09375,
+ 61.6875,
+ 59.8125,
+ 57.625,
+ 59.8125,
+ 61.3125,
+ 57.03125,
+ 62.0625,
+ 62.3125,
+ 56.65625,
+ 56.84375,
+ 62.15625,
+ 58.9375,
+ 61.875,
+ 65.875,
+ 58.71875,
+ 60.1875,
+ 60.25,
+ 63.6875,
+ 61.78125,
+ 60.53125,
+ 62.375,
+ 64.3125,
+ 59.28125,
+ 63.5625,
+ 58.125,
+ 59.9375,
+ 64.625,
+ 61.34375,
+ 63.25,
+ 59.90625,
+ 57.46875,
+ 59.625,
+ 59.40625,
+ 62.34375,
+ 63.3125,
+ 65.34375,
+ 59.59375,
+ 61.0625,
+ 63.90625,
+ 54.96875,
+ 64.75,
+ 60.3125,
+ 60.1875,
+ 61.34375,
+ 59.375,
+ 58.28125,
+ 59.03125,
+ 62.53125,
+ 61.8125,
+ 61.59375,
+ 59.3125,
+ 60.125,
+ 59.4375,
+ 57.21875,
+ 57.09375,
+ 60.5625,
+ 58.78125,
+ 60.90625,
+ 60.6875,
+ 64.1875,
+ 58.75,
+ 56.875,
+ 62.90625,
+ 60.40625,
+ 59.15625,
+ 65.96875,
+ 59.25,
+ 66.0,
+ 60.8125,
+ 64.28125,
+ 61.03125,
+ 59.875,
+ 65.875,
+ 61.84375,
+ 61.1875,
+ 61.75,
+ 56.28125,
+ 58.34375,
+ 60.125,
+ 59.78125,
+ 63.40625,
+ 62.8125,
+ 59.0625,
+ 58.4375,
+ 63.875,
+ 59.5,
+ 59.65625
+ ],
+ "test_ep_length_mean_T": [
+ 194,
+ 10374,
+ 20428,
+ 30500,
+ 40676,
+ 50873,
+ 61038,
+ 71210,
+ 81230,
+ 91272,
+ 101424,
+ 111431,
+ 121432,
+ 131673,
+ 141819,
+ 151830,
+ 161986,
+ 172109,
+ 182334,
+ 192453,
+ 202496,
+ 212599,
+ 222635,
+ 232764,
+ 242814,
+ 252819,
+ 262843,
+ 272899,
+ 283084,
+ 293094,
+ 303310,
+ 313472,
+ 323617,
+ 333784,
+ 343903,
+ 353921,
+ 364085,
+ 374287,
+ 384290,
+ 394296,
+ 404471,
+ 414560,
+ 424686,
+ 434696,
+ 444926,
+ 455104,
+ 465150,
+ 475252,
+ 485342,
+ 495399,
+ 505436,
+ 515696,
+ 525789,
+ 535930,
+ 545945,
+ 555945,
+ 566049,
+ 576122,
+ 586317,
+ 596519,
+ 606748,
+ 616889,
+ 627158,
+ 637260,
+ 647263,
+ 657273,
+ 667473,
+ 677721,
+ 687849,
+ 698027,
+ 708100,
+ 718102,
+ 728312,
+ 738459,
+ 748659,
+ 758695,
+ 768913,
+ 778922,
+ 789065,
+ 799266,
+ 809346,
+ 819530,
+ 829586,
+ 839697,
+ 849860,
+ 860025,
+ 870220,
+ 880415,
+ 890511,
+ 900525,
+ 910705,
+ 920715,
+ 930760,
+ 940766,
+ 950940,
+ 961103,
+ 971321,
+ 981450,
+ 991606,
+ 1001651,
+ 1011710,
+ 1021718,
+ 1031978,
+ 1042077,
+ 1052110,
+ 1062289,
+ 1072420,
+ 1082524,
+ 1092746,
+ 1102920,
+ 1112940,
+ 1122984,
+ 1133088,
+ 1143284,
+ 1153391,
+ 1163395,
+ 1173498,
+ 1183612,
+ 1193776,
+ 1203953,
+ 1213973,
+ 1224024,
+ 1234153,
+ 1244295,
+ 1254356,
+ 1264497,
+ 1274565,
+ 1284604,
+ 1294621,
+ 1304713,
+ 1314851,
+ 1325035,
+ 1335188,
+ 1345253,
+ 1355510,
+ 1365716,
+ 1375874,
+ 1386066,
+ 1396208,
+ 1406386,
+ 1416515,
+ 1426604,
+ 1436650,
+ 1446726,
+ 1456821,
+ 1466930,
+ 1477099,
+ 1487150,
+ 1497352,
+ 1507391,
+ 1517564,
+ 1527624,
+ 1537679,
+ 1547897,
+ 1558095,
+ 1568161,
+ 1578315,
+ 1588334,
+ 1598393,
+ 1608483,
+ 1618592,
+ 1628657,
+ 1638781,
+ 1649029,
+ 1659119,
+ 1669119,
+ 1679375,
+ 1689413,
+ 1699570,
+ 1709768,
+ 1719828,
+ 1729948,
+ 1740177,
+ 1750184,
+ 1760371,
+ 1770392,
+ 1780586,
+ 1790740,
+ 1800756,
+ 1810941,
+ 1820956,
+ 1831037,
+ 1841313,
+ 1851534,
+ 1861656,
+ 1871864,
+ 1882025,
+ 1892117,
+ 1902270,
+ 1912333,
+ 1922467,
+ 1932708,
+ 1942801,
+ 1952927,
+ 1963074,
+ 1973090,
+ 1983234,
+ 1993269,
+ 2003474,
+ 2013561,
+ 2023661,
+ 2033881,
+ 2043963,
+ 2054119,
+ 2064154,
+ 2074351,
+ 2084444,
+ 2094600,
+ 2104748,
+ 2114931,
+ 2124947,
+ 2135163,
+ 2145354,
+ 2155508,
+ 2165692,
+ 2175715,
+ 2185926,
+ 2196148,
+ 2206249,
+ 2216432,
+ 2226606,
+ 2236624,
+ 2246771,
+ 2256780,
+ 2266859,
+ 2276957,
+ 2287104,
+ 2297302,
+ 2307303,
+ 2317390,
+ 2327462,
+ 2337611,
+ 2347768,
+ 2357940,
+ 2368145,
+ 2378307,
+ 2388379,
+ 2398570,
+ 2408757,
+ 2418798,
+ 2428888,
+ 2438913,
+ 2449116,
+ 2459260,
+ 2469434,
+ 2479626,
+ 2489833,
+ 2499991,
+ 2510020,
+ 2520059,
+ 2530270,
+ 2540433,
+ 2550496,
+ 2560624,
+ 2570804,
+ 2580874,
+ 2590993,
+ 2601088,
+ 2611125,
+ 2621242,
+ 2631351,
+ 2641522,
+ 2651675,
+ 2661736,
+ 2671810,
+ 2681946,
+ 2692137,
+ 2702320,
+ 2712415,
+ 2722550,
+ 2732570,
+ 2742690,
+ 2752870,
+ 2762941,
+ 2773018,
+ 2783159,
+ 2793224,
+ 2803361,
+ 2813487,
+ 2823679,
+ 2833919,
+ 2844122,
+ 2854371,
+ 2864515,
+ 2874568,
+ 2884568,
+ 2894772,
+ 2904828,
+ 2914915,
+ 2925139,
+ 2935385,
+ 2945575,
+ 2955784,
+ 2965878,
+ 2976079,
+ 2986111,
+ 2996149,
+ 3006390,
+ 3016621,
+ 3026662,
+ 3036882,
+ 3046930,
+ 3057109,
+ 3067327,
+ 3077504,
+ 3087571,
+ 3097605,
+ 3107719,
+ 3117777,
+ 3128009,
+ 3138168,
+ 3148229,
+ 3158481,
+ 3168634,
+ 3178702,
+ 3188844,
+ 3199082,
+ 3209229,
+ 3219299,
+ 3229532,
+ 3239753,
+ 3249900,
+ 3259919,
+ 3270054,
+ 3280228,
+ 3290271,
+ 3300374,
+ 3310465,
+ 3320623,
+ 3330643,
+ 3340717,
+ 3350810,
+ 3360905,
+ 3371083,
+ 3381150,
+ 3391367,
+ 3401585,
+ 3411654,
+ 3421858,
+ 3431907,
+ 3442063,
+ 3452100,
+ 3462350,
+ 3472490,
+ 3482659,
+ 3492889,
+ 3503095,
+ 3513118,
+ 3523288,
+ 3533471,
+ 3543630,
+ 3553721,
+ 3563794,
+ 3573998,
+ 3584105,
+ 3594290,
+ 3604367,
+ 3614553,
+ 3624782,
+ 3634831,
+ 3644898,
+ 3654942,
+ 3664952,
+ 3675136,
+ 3685260,
+ 3695334,
+ 3705582,
+ 3715735,
+ 3725762,
+ 3735774,
+ 3745999,
+ 3756145,
+ 3766225,
+ 3776477,
+ 3786555,
+ 3796691,
+ 3806809,
+ 3816898,
+ 3826969,
+ 3837146,
+ 3847388,
+ 3857506,
+ 3867569,
+ 3877601,
+ 3887789,
+ 3897865,
+ 3907956,
+ 3918081,
+ 3928149,
+ 3938292,
+ 3948357,
+ 3958542,
+ 3968658,
+ 3978747,
+ 3988818,
+ 3999000,
+ 4009013,
+ 4019014,
+ 4029173,
+ 4039250,
+ 4049428
+ ],
+ "test_return_max": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.7920792079207921
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.5533980582524272
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.0
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.378640776699044
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.960396039603964
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.960396039603957
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.158415841584155
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.415048543689345
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 22.356435643564346
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207917
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.554455445544555
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.300970873786408
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.32038834951457
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.902912621359228
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.356435643564353
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118811
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.356435643564346
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.18446601941748
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.98019801980198
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.262376237623755
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.689320388349522
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.495145631067967
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188112
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.16504854368933
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 20.19801980198021
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 21.136138613861398
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.592233009708742
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.49514563106798
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.262135922330113
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.96039603960396
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.138613861386126
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.126213592233025
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.378640776699037
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.15841584158415
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.61633663366337
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188112
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.378640776699022
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 20.792079207920793
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.378640776699033
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207907
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.88118811881188
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.492574257425748
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 26.194174757281544
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.158415841584166
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.378640776699044
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 20.19801980198021
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.495145631067974
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.378640776699037
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.158415841584155
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.158415841584148
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 34.95145631067964
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.689320388349536
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.38613861386139
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.96039603960396
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.554455445544548
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.572815533980595
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 20.19801980198021
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.76699029126215
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.378640776699037
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 26.495049504950497
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.373762376237615
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207924
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.45544554455445
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.067961165048548
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.881188118811878
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.57281553398058
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168314
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.55445544554456
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207914
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.88118811881186
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.43564356435643
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.220297029702976
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.35643564356436
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.184466019417478
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.15841584158416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.07766990291261
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.237623762376245
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188126
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207924
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.15841584158416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.378640776699033
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.15841584158416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.572815533980588
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.683168316831683
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.184466019417474
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.15841584158416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.356435643564367
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.398058252427198
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.378640776699037
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.76699029126214
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207917
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.76699029126213
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.572815533980584
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.178217821782177
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.15841584158415
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.68932038834953
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.90291262135922
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.356435643564353
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.386138613861377
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.262135922330117
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.456310679611658
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.62376237623763
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.572815533980595
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.495145631067963
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.554455445544555
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 26.660891089108883
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.417475728155356
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.15841584158415
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 20.19801980198021
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.37864077669905
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.57281553398059
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.883495145631056
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.37623762376236
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.941747572815537
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.26213592233011
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.26213592233011
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.999999999999996
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.88349514563107
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.207920792079207
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.25742574257425
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.728155339805838
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 20.452970297029708
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.572815533980595
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.356435643564353
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.88349514563107
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.554455445544555
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.158415841584155
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207917
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.15841584158416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.262135922330096
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.153465346534624
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.883495145631063
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.96782178217822
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.613861386138606
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.76980198019801
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.572815533980588
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.96039603960396
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.762376237623755
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.495145631067984
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.9207920792079
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.554455445544555
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 26.534653465346537
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118811
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207914
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.356435643564357
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118811
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.722772277227712
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.356435643564353
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.495145631067967
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.15841584158416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.157766990291265
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.742574257425737
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.19417475728156
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.722772277227723
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.978155339805838
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 21.32178217821782
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.059405940594058
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.378640776699033
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.572815533980595
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.398058252427198
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.689320388349525
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.15841584158416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.356435643564353
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.722772277227715
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.15841584158416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.883495145631073
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.212871287128714
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.74757281553399
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.980198019801975
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.57281553398059
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.960396039603964
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.960396039603964
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.980198019801968
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.25742574257425
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.861386138613863
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 20.396039603960403
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.960396039603957
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.336633663366335
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.92079207920792
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.960396039603957
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207914
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.01237623762376
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188112
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.356435643564346
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.96039603960395
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.378640776699044
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.099009900990094
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.881188118811885
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 21.713592233009717
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.00000000000001
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 26.563106796116525
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.766990291262147
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.94059405940594
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.960396039603957
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.15841584158416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.960396039603957
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.15841584158415
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 20.19801980198021
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.158415841584155
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 20.74257425742575
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.572815533980588
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.158415841584155
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.242718446601952
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.262135922330106
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.696601941747588
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.883495145631077
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.495145631067967
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 20.594059405940598
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.455445544554454
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 21.603960396039607
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 20.396039603960403
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.606435643564364
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.722772277227715
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.15841584158416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.158415841584155
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.378640776699033
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.15841584158416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 26.03960396039604
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.262135922330106
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.722772277227705
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.217821782178216
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118811
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.960396039603964
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.88349514563107
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.883495145631066
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.31683168316831
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.35643564356436
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.378640776699033
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.76699029126214
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.158415841584162
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.853960396039593
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.34466019417477
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.561881188118807
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.158415841584166
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.43446601941749
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.37623762376237
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.17821782178217
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 20.396039603960403
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.410891089108908
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.138613861386133
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118811
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 32.448019801980195
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.067961165048565
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.45631067961166
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.158415841584166
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.356435643564353
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.356435643564353
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.324257425742566
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.146039603960396
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.158415841584162
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.356435643564357
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.92079207920792
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.68932038834952
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.15841584158415
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.554455445544555
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.92079207920792
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.077669902912625
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 26.31067961165049
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 32.07920792079208
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.801980198019805
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.495145631067967
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.378640776699033
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.180693069306926
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.881188118811878
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.96039603960396
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.960396039603957
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188122
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.15841584158416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.158415841584162
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 20.19801980198021
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.06796116504855
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.960396039603953
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.495145631067963
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.398058252427195
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.713592233009724
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188112
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.960396039603957
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.15841584158415
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.572815533980602
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.415841584158414
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.689320388349536
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.572815533980595
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.356435643564353
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.15841584158416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.077669902912625
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.881188118811878
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.077669902912614
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.217821782178216
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.11881188118812
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.920792079207917
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.766990291262143
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.689320388349515
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.15841584158416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.1188118811881
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.15841584158416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118811
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.960396039603953
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.68932038834951
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.35643564356436
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.495145631067977
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.378640776699044
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.689320388349522
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.300970873786415
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.46039603960396
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.57281553398059
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.35643564356436
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.92079207920792
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.37128712871287
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.58415841584158
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.689320388349525
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.92079207920791
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.572815533980584
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.940594059405928
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.57281553398059
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.378640776699037
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.613861386138613
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.572815533980595
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.356435643564353
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.356435643564353
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 32.079207920792086
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.15841584158416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.960396039603953
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 20.19801980198021
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.42718446601943
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.57281553398059
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.881188118811878
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.356435643564367
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.980198019801975
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.94059405940593
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.158415841584166
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.960396039603953
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.33663366336634
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.17821782178218
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.813106796116504
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.980198019801982
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.72277227722771
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.92079207920791
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.792079207920793
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.554455445544555
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.572815533980595
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.834951456310673
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.35643564356436
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.960396039603964
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.356435643564357
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.158415841584155
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.06796116504856
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.356435643564346
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.68316831683169
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188122
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.881188118811863
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.21039603960396
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.029126213592235
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.554455445544555
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.398058252427195
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118812
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.356435643564357
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.37376237623762
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.940594059405935
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.766990291262136
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.722772277227712
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.158415841584155
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207907
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118812
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.15841584158416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.88118811881187
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 35.84158415841584
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.76456310679613
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 26.34951456310682
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.15346534653464
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207907
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.417475728155345
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.70297029702969
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207914
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.26213592233011
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.17821782178218
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.158415841584155
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.24504950495049
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.378640776699037
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.356435643564353
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.88349514563107
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.262135922330124
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.74271844660195
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.960396039603957
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.572815533980588
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.92079207920792
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.35643564356436
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207914
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.57281553398059
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.45631067961165
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207914
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.37864077669905
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.158415841584166
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.43689320388349
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.184466019417492
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207914
+ }
+ ],
+ "test_return_max_T": [
+ 194,
+ 10374,
+ 20428,
+ 30500,
+ 40676,
+ 50873,
+ 61038,
+ 71210,
+ 81230,
+ 91272,
+ 101424,
+ 111431,
+ 121432,
+ 131673,
+ 141819,
+ 151830,
+ 161986,
+ 172109,
+ 182334,
+ 192453,
+ 202496,
+ 212599,
+ 222635,
+ 232764,
+ 242814,
+ 252819,
+ 262843,
+ 272899,
+ 283084,
+ 293094,
+ 303310,
+ 313472,
+ 323617,
+ 333784,
+ 343903,
+ 353921,
+ 364085,
+ 374287,
+ 384290,
+ 394296,
+ 404471,
+ 414560,
+ 424686,
+ 434696,
+ 444926,
+ 455104,
+ 465150,
+ 475252,
+ 485342,
+ 495399,
+ 505436,
+ 515696,
+ 525789,
+ 535930,
+ 545945,
+ 555945,
+ 566049,
+ 576122,
+ 586317,
+ 596519,
+ 606748,
+ 616889,
+ 627158,
+ 637260,
+ 647263,
+ 657273,
+ 667473,
+ 677721,
+ 687849,
+ 698027,
+ 708100,
+ 718102,
+ 728312,
+ 738459,
+ 748659,
+ 758695,
+ 768913,
+ 778922,
+ 789065,
+ 799266,
+ 809346,
+ 819530,
+ 829586,
+ 839697,
+ 849860,
+ 860025,
+ 870220,
+ 880415,
+ 890511,
+ 900525,
+ 910705,
+ 920715,
+ 930760,
+ 940766,
+ 950940,
+ 961103,
+ 971321,
+ 981450,
+ 991606,
+ 1001651,
+ 1011710,
+ 1021718,
+ 1031978,
+ 1042077,
+ 1052110,
+ 1062289,
+ 1072420,
+ 1082524,
+ 1092746,
+ 1102920,
+ 1112940,
+ 1122984,
+ 1133088,
+ 1143284,
+ 1153391,
+ 1163395,
+ 1173498,
+ 1183612,
+ 1193776,
+ 1203953,
+ 1213973,
+ 1224024,
+ 1234153,
+ 1244295,
+ 1254356,
+ 1264497,
+ 1274565,
+ 1284604,
+ 1294621,
+ 1304713,
+ 1314851,
+ 1325035,
+ 1335188,
+ 1345253,
+ 1355510,
+ 1365716,
+ 1375874,
+ 1386066,
+ 1396208,
+ 1406386,
+ 1416515,
+ 1426604,
+ 1436650,
+ 1446726,
+ 1456821,
+ 1466930,
+ 1477099,
+ 1487150,
+ 1497352,
+ 1507391,
+ 1517564,
+ 1527624,
+ 1537679,
+ 1547897,
+ 1558095,
+ 1568161,
+ 1578315,
+ 1588334,
+ 1598393,
+ 1608483,
+ 1618592,
+ 1628657,
+ 1638781,
+ 1649029,
+ 1659119,
+ 1669119,
+ 1679375,
+ 1689413,
+ 1699570,
+ 1709768,
+ 1719828,
+ 1729948,
+ 1740177,
+ 1750184,
+ 1760371,
+ 1770392,
+ 1780586,
+ 1790740,
+ 1800756,
+ 1810941,
+ 1820956,
+ 1831037,
+ 1841313,
+ 1851534,
+ 1861656,
+ 1871864,
+ 1882025,
+ 1892117,
+ 1902270,
+ 1912333,
+ 1922467,
+ 1932708,
+ 1942801,
+ 1952927,
+ 1963074,
+ 1973090,
+ 1983234,
+ 1993269,
+ 2003474,
+ 2013561,
+ 2023661,
+ 2033881,
+ 2043963,
+ 2054119,
+ 2064154,
+ 2074351,
+ 2084444,
+ 2094600,
+ 2104748,
+ 2114931,
+ 2124947,
+ 2135163,
+ 2145354,
+ 2155508,
+ 2165692,
+ 2175715,
+ 2185926,
+ 2196148,
+ 2206249,
+ 2216432,
+ 2226606,
+ 2236624,
+ 2246771,
+ 2256780,
+ 2266859,
+ 2276957,
+ 2287104,
+ 2297302,
+ 2307303,
+ 2317390,
+ 2327462,
+ 2337611,
+ 2347768,
+ 2357940,
+ 2368145,
+ 2378307,
+ 2388379,
+ 2398570,
+ 2408757,
+ 2418798,
+ 2428888,
+ 2438913,
+ 2449116,
+ 2459260,
+ 2469434,
+ 2479626,
+ 2489833,
+ 2499991,
+ 2510020,
+ 2520059,
+ 2530270,
+ 2540433,
+ 2550496,
+ 2560624,
+ 2570804,
+ 2580874,
+ 2590993,
+ 2601088,
+ 2611125,
+ 2621242,
+ 2631351,
+ 2641522,
+ 2651675,
+ 2661736,
+ 2671810,
+ 2681946,
+ 2692137,
+ 2702320,
+ 2712415,
+ 2722550,
+ 2732570,
+ 2742690,
+ 2752870,
+ 2762941,
+ 2773018,
+ 2783159,
+ 2793224,
+ 2803361,
+ 2813487,
+ 2823679,
+ 2833919,
+ 2844122,
+ 2854371,
+ 2864515,
+ 2874568,
+ 2884568,
+ 2894772,
+ 2904828,
+ 2914915,
+ 2925139,
+ 2935385,
+ 2945575,
+ 2955784,
+ 2965878,
+ 2976079,
+ 2986111,
+ 2996149,
+ 3006390,
+ 3016621,
+ 3026662,
+ 3036882,
+ 3046930,
+ 3057109,
+ 3067327,
+ 3077504,
+ 3087571,
+ 3097605,
+ 3107719,
+ 3117777,
+ 3128009,
+ 3138168,
+ 3148229,
+ 3158481,
+ 3168634,
+ 3178702,
+ 3188844,
+ 3199082,
+ 3209229,
+ 3219299,
+ 3229532,
+ 3239753,
+ 3249900,
+ 3259919,
+ 3270054,
+ 3280228,
+ 3290271,
+ 3300374,
+ 3310465,
+ 3320623,
+ 3330643,
+ 3340717,
+ 3350810,
+ 3360905,
+ 3371083,
+ 3381150,
+ 3391367,
+ 3401585,
+ 3411654,
+ 3421858,
+ 3431907,
+ 3442063,
+ 3452100,
+ 3462350,
+ 3472490,
+ 3482659,
+ 3492889,
+ 3503095,
+ 3513118,
+ 3523288,
+ 3533471,
+ 3543630,
+ 3553721,
+ 3563794,
+ 3573998,
+ 3584105,
+ 3594290,
+ 3604367,
+ 3614553,
+ 3624782,
+ 3634831,
+ 3644898,
+ 3654942,
+ 3664952,
+ 3675136,
+ 3685260,
+ 3695334,
+ 3705582,
+ 3715735,
+ 3725762,
+ 3735774,
+ 3745999,
+ 3756145,
+ 3766225,
+ 3776477,
+ 3786555,
+ 3796691,
+ 3806809,
+ 3816898,
+ 3826969,
+ 3837146,
+ 3847388,
+ 3857506,
+ 3867569,
+ 3877601,
+ 3887789,
+ 3897865,
+ 3907956,
+ 3918081,
+ 3928149,
+ 3938292,
+ 3948357,
+ 3958542,
+ 3968658,
+ 3978747,
+ 3988818,
+ 3999000,
+ 4009013,
+ 4019014,
+ 4029173,
+ 4039250,
+ 4049428
+ ],
+ "test_return_mean": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.049024319907718925
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.10697034509276171
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.450456749255025
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.349864522253197
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.292111500288382
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.603255971835049
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.204121557483422
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.914724869028166
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.982016906180913
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.400581563010672
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.93924904474671
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.721836549553014
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.685350680092283
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.359933733057773
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 11.9158949041142
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.932937013361533
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.673212504806306
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.521249128857063
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.473331761270789
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.784239672450257
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 11.218830355426324
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.987647042920315
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.830211237143134
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.736543845525333
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.532026518792659
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.572729170671925
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.404848661443816
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.466198151975394
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.129228047197927
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.076378959194468
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.324924300682497
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.766407526675001
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.892904240363361
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.500280868499473
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.224757882341635
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.747038114005578
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.73248326804768
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.199774103623957
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.274615795924255
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.679331532971261
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.105884420359514
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.212422948908971
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.282309640248009
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.558597880419114
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.02607571133327
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.112583960155728
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.571629728203405
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.494173105113912
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.25236560367202
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.335640109824094
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.95476064596751
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.346259792848219
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.904184039459775
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.007517362779971
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.182913732817457
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.5504264094492
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.92156859800058
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.541388301451509
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.958179882005195
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.247856687974625
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.642337246467367
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.91019868066904
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.017545269393448
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.31967566447179
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.328847447851585
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.360674955541674
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.144900359271366
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.415233286071327
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.534519038979143
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.990532328414883
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.802827309429974
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.60403699653946
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.478025419350189
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.835640860809384
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.067451246034803
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.506379620061523
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.064291850908393
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.076937692252237
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.144131350331637
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.158367778525427
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.64652023454773
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.7171241168413
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.828084446794197
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.776659977890995
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.508860124483325
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.183985388830145
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.41832509252139
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.568814284341059
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.544051295299436
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.94629704171874
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.629524686388546
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.318249543400947
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.436893203883498
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.052480354224745
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.114574071181393
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.476769771940788
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.577619586898013
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.444110172546385
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.687491739161784
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.525348006584641
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.074311496683652
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.585688172882826
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.55504496899933
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.77466160602711
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.97827474646737
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.428509955061044
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.470551613717198
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.864933793136597
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.542604897625688
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.158086159040664
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.747060643564359
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.250510669999041
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.72086552556955
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.334351419061813
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.731537777564167
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.063830745938674
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.9299345741613
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.203514010381625
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.96387760742094
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.480559994713065
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.1443513890224
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.761584699125255
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.711624651542829
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.999028976016538
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.60232099514564
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.279730756752864
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.21341950639239
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.72990288258195
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.500066086705761
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.592800003604733
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.138522241180432
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.90271661419783
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.74355279126214
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.41157899163703
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.132100565942519
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.634357276747096
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.469523514851488
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.69683024127656
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.796109745986737
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.50635408656157
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.11168052484861
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.610641912188793
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.748342575459004
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.14331052340671
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.175255184802463
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.48298267326733
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.942271760549847
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.315934255743539
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.801841265740656
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.55197343915217
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.918358135874271
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.57127526314525
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.432626105450353
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.904239612371436
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.97602329255984
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.93939323392291
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.2838649307892
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.991475565942519
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.609136186676924
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.305381410410462
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.141619304527545
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.09495908632126
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.898556906661543
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.201835558252432
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.412402071517835
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.84811247356532
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.208628220224938
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.37106108213977
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.250229050514278
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.53772875012016
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.436100163414402
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.61663402383928
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.420856663943098
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.486308036143422
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.323293160626747
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.752675009612616
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.009629884408348
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.88597489906758
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.129363975535906
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.030396880707492
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.53833704820725
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.344675964865907
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.158426355378259
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.309679299240607
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.06035218206287
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.133085107661252
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.349766894165146
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.637089361241951
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.565573782803042
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.659807417331542
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.492788288234166
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.228525575555132
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.618506981159285
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.244728834230514
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.939333906084789
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.908771808612904
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.136186676920122
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.307830373449969
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.130294446313567
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.089737485581086
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.484299901470735
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.874666562530042
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.840842184946652
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.313959164423729
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.576284335047585
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.02736064716909
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.916064626790352
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.924076888878211
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.61760579880804
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.782311893203888
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.831896448139961
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.49803241853312
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.33314533668173
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.499683084206483
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.912118950062487
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.853371623570126
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.993132990483517
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.137328925550328
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.33415315894454
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.948212054215134
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.177066561328466
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.351071355618574
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.367674889454968
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.141624561424592
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.914082776602905
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.78609235316736
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.408763547774686
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.078064170191293
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.016436064116123
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.413096732913587
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.535713105594542
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.224464998077483
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.050752337066235
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.626839162981838
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.80342284076709
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.507176415457081
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.326040264827455
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.55572836561569
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.460740741853314
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.135167589877923
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.212689548687882
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.4819771039604
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.147432681678364
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.087826978996446
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.891813809718354
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.839307921993658
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.793872560799771
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.128836783860429
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.491064776987411
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.531280039411714
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.247414357637222
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.749474310295112
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.400569547245986
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.273472045323468
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.11293992718447
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.463279823127948
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.49974091007402
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.463167175334043
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.087085005527257
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.066845951888883
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.12066606387581
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.121937481976357
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.121742976785548
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.227248149572244
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.235466932615594
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.104590472700185
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.88689635802173
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.702410062001348
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.075644495578203
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.06025455397482
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.60803298928194
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.103626207584355
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.495351401038164
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.144249255022594
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.749001189560708
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.495889106507743
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.497686965298474
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.70053034581371
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.01921245674325
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.998094750312415
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.235786101364994
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.31860626141498
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.105670389551094
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.128871329183893
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.689711651687018
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.278498389887536
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.048938707584353
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.949358808757093
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.414311076131888
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.912861674516968
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.827103660001924
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.69764731327502
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.705506374363168
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.865897307267137
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.602372813130835
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.105614065654143
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.204844756320297
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.829555626982604
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.183847207536292
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.763167025136982
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.90245902624243
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.87397415409017
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.794753466548116
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.490758374987989
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.594622644910126
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.074708767903495
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.413518786648085
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.776797408199561
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.208061226328946
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.807364762568495
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.349925352061907
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.44643297005672
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.141014761366918
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.068742940738252
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.185394988224555
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.769488819330963
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.621835347976546
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.168162879698166
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.008360719263676
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.155933835191775
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.067681047534368
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.744640968951266
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.779461904018074
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.726639851485153
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.508348703498996
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.086011847543979
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.71403906925887
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.404528741709125
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.027662543256756
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.851464871911952
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.490304028885902
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.833061226328947
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.790825813467272
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.555435481351534
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.3206008783524
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.307686935259063
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.717175183841203
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.82028621551476
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.869939861097764
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.382699101220805
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.010560355186005
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.203448674661157
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.685930440738252
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.588842311112183
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.377070466451988
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.969219365807943
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.97222631091993
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.18675201864847
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.238891425550324
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.638133230798815
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.282013752042682
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.254763499711625
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.946294037777566
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.462526584879367
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.96364254902432
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.208032688887826
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.388916508459104
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.061131704796697
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.55201699629915
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.122853684033455
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.879953498990679
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.340623648226478
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.109791045852162
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.08064004974527
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.62486031673556
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.639285993223112
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.149093110160532
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.397190864414114
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.371934478035184
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.612294830818037
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.256507287561284
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.012537699461696
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.136459284581374
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.08135123281746
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.765463538162072
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.510495770450834
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.180930380659426
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.944931750456602
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.063496557483422
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.104825531096804
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.930470026675
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.948674661155438
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.381375865135062
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.473910019946173
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.105722958521582
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.049619851244838
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.66005599346343
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.029691705517642
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.579342347159475
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.960087384648661
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.896288931077574
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.830293845525334
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.638827892194563
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.979056522157073
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.470925604392967
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.582452177256567
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.34579042704028
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.320226887676636
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.60460849634721
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.481862203210618
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.557298675862734
+ }
+ ],
+ "test_return_mean_T": [
+ 194,
+ 10374,
+ 20428,
+ 30500,
+ 40676,
+ 50873,
+ 61038,
+ 71210,
+ 81230,
+ 91272,
+ 101424,
+ 111431,
+ 121432,
+ 131673,
+ 141819,
+ 151830,
+ 161986,
+ 172109,
+ 182334,
+ 192453,
+ 202496,
+ 212599,
+ 222635,
+ 232764,
+ 242814,
+ 252819,
+ 262843,
+ 272899,
+ 283084,
+ 293094,
+ 303310,
+ 313472,
+ 323617,
+ 333784,
+ 343903,
+ 353921,
+ 364085,
+ 374287,
+ 384290,
+ 394296,
+ 404471,
+ 414560,
+ 424686,
+ 434696,
+ 444926,
+ 455104,
+ 465150,
+ 475252,
+ 485342,
+ 495399,
+ 505436,
+ 515696,
+ 525789,
+ 535930,
+ 545945,
+ 555945,
+ 566049,
+ 576122,
+ 586317,
+ 596519,
+ 606748,
+ 616889,
+ 627158,
+ 637260,
+ 647263,
+ 657273,
+ 667473,
+ 677721,
+ 687849,
+ 698027,
+ 708100,
+ 718102,
+ 728312,
+ 738459,
+ 748659,
+ 758695,
+ 768913,
+ 778922,
+ 789065,
+ 799266,
+ 809346,
+ 819530,
+ 829586,
+ 839697,
+ 849860,
+ 860025,
+ 870220,
+ 880415,
+ 890511,
+ 900525,
+ 910705,
+ 920715,
+ 930760,
+ 940766,
+ 950940,
+ 961103,
+ 971321,
+ 981450,
+ 991606,
+ 1001651,
+ 1011710,
+ 1021718,
+ 1031978,
+ 1042077,
+ 1052110,
+ 1062289,
+ 1072420,
+ 1082524,
+ 1092746,
+ 1102920,
+ 1112940,
+ 1122984,
+ 1133088,
+ 1143284,
+ 1153391,
+ 1163395,
+ 1173498,
+ 1183612,
+ 1193776,
+ 1203953,
+ 1213973,
+ 1224024,
+ 1234153,
+ 1244295,
+ 1254356,
+ 1264497,
+ 1274565,
+ 1284604,
+ 1294621,
+ 1304713,
+ 1314851,
+ 1325035,
+ 1335188,
+ 1345253,
+ 1355510,
+ 1365716,
+ 1375874,
+ 1386066,
+ 1396208,
+ 1406386,
+ 1416515,
+ 1426604,
+ 1436650,
+ 1446726,
+ 1456821,
+ 1466930,
+ 1477099,
+ 1487150,
+ 1497352,
+ 1507391,
+ 1517564,
+ 1527624,
+ 1537679,
+ 1547897,
+ 1558095,
+ 1568161,
+ 1578315,
+ 1588334,
+ 1598393,
+ 1608483,
+ 1618592,
+ 1628657,
+ 1638781,
+ 1649029,
+ 1659119,
+ 1669119,
+ 1679375,
+ 1689413,
+ 1699570,
+ 1709768,
+ 1719828,
+ 1729948,
+ 1740177,
+ 1750184,
+ 1760371,
+ 1770392,
+ 1780586,
+ 1790740,
+ 1800756,
+ 1810941,
+ 1820956,
+ 1831037,
+ 1841313,
+ 1851534,
+ 1861656,
+ 1871864,
+ 1882025,
+ 1892117,
+ 1902270,
+ 1912333,
+ 1922467,
+ 1932708,
+ 1942801,
+ 1952927,
+ 1963074,
+ 1973090,
+ 1983234,
+ 1993269,
+ 2003474,
+ 2013561,
+ 2023661,
+ 2033881,
+ 2043963,
+ 2054119,
+ 2064154,
+ 2074351,
+ 2084444,
+ 2094600,
+ 2104748,
+ 2114931,
+ 2124947,
+ 2135163,
+ 2145354,
+ 2155508,
+ 2165692,
+ 2175715,
+ 2185926,
+ 2196148,
+ 2206249,
+ 2216432,
+ 2226606,
+ 2236624,
+ 2246771,
+ 2256780,
+ 2266859,
+ 2276957,
+ 2287104,
+ 2297302,
+ 2307303,
+ 2317390,
+ 2327462,
+ 2337611,
+ 2347768,
+ 2357940,
+ 2368145,
+ 2378307,
+ 2388379,
+ 2398570,
+ 2408757,
+ 2418798,
+ 2428888,
+ 2438913,
+ 2449116,
+ 2459260,
+ 2469434,
+ 2479626,
+ 2489833,
+ 2499991,
+ 2510020,
+ 2520059,
+ 2530270,
+ 2540433,
+ 2550496,
+ 2560624,
+ 2570804,
+ 2580874,
+ 2590993,
+ 2601088,
+ 2611125,
+ 2621242,
+ 2631351,
+ 2641522,
+ 2651675,
+ 2661736,
+ 2671810,
+ 2681946,
+ 2692137,
+ 2702320,
+ 2712415,
+ 2722550,
+ 2732570,
+ 2742690,
+ 2752870,
+ 2762941,
+ 2773018,
+ 2783159,
+ 2793224,
+ 2803361,
+ 2813487,
+ 2823679,
+ 2833919,
+ 2844122,
+ 2854371,
+ 2864515,
+ 2874568,
+ 2884568,
+ 2894772,
+ 2904828,
+ 2914915,
+ 2925139,
+ 2935385,
+ 2945575,
+ 2955784,
+ 2965878,
+ 2976079,
+ 2986111,
+ 2996149,
+ 3006390,
+ 3016621,
+ 3026662,
+ 3036882,
+ 3046930,
+ 3057109,
+ 3067327,
+ 3077504,
+ 3087571,
+ 3097605,
+ 3107719,
+ 3117777,
+ 3128009,
+ 3138168,
+ 3148229,
+ 3158481,
+ 3168634,
+ 3178702,
+ 3188844,
+ 3199082,
+ 3209229,
+ 3219299,
+ 3229532,
+ 3239753,
+ 3249900,
+ 3259919,
+ 3270054,
+ 3280228,
+ 3290271,
+ 3300374,
+ 3310465,
+ 3320623,
+ 3330643,
+ 3340717,
+ 3350810,
+ 3360905,
+ 3371083,
+ 3381150,
+ 3391367,
+ 3401585,
+ 3411654,
+ 3421858,
+ 3431907,
+ 3442063,
+ 3452100,
+ 3462350,
+ 3472490,
+ 3482659,
+ 3492889,
+ 3503095,
+ 3513118,
+ 3523288,
+ 3533471,
+ 3543630,
+ 3553721,
+ 3563794,
+ 3573998,
+ 3584105,
+ 3594290,
+ 3604367,
+ 3614553,
+ 3624782,
+ 3634831,
+ 3644898,
+ 3654942,
+ 3664952,
+ 3675136,
+ 3685260,
+ 3695334,
+ 3705582,
+ 3715735,
+ 3725762,
+ 3735774,
+ 3745999,
+ 3756145,
+ 3766225,
+ 3776477,
+ 3786555,
+ 3796691,
+ 3806809,
+ 3816898,
+ 3826969,
+ 3837146,
+ 3847388,
+ 3857506,
+ 3867569,
+ 3877601,
+ 3887789,
+ 3897865,
+ 3907956,
+ 3918081,
+ 3928149,
+ 3938292,
+ 3948357,
+ 3958542,
+ 3968658,
+ 3978747,
+ 3988818,
+ 3999000,
+ 4009013,
+ 4019014,
+ 4029173,
+ 4039250,
+ 4049428
+ ],
+ "test_return_min": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.0
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.0
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.1067961165048534
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.941747572815534
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.436893203883494
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.750000000000001
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.467821782178218
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.485148514851486
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.653465346534656
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.9381188118811887
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.342233009708737
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.316831683168317
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.058252427184466
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.396039603960396
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.814356435643565
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.776699029126212
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.752475247524753
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.4727722772277225
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.609223300970873
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.395631067961164
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.222772277227723
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.796116504854368
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.0816831683168315
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.9368932038834954
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.371287128712873
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.03640776699029
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.646039603960397
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.604368932038834
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.116504854368931
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.871287128712873
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.097087378640775
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.924757281553396
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.188118811881191
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.165048543689319
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.304455445544557
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.336633663366338
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.13861386138614
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.143564356435645
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.841584158415844
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.446601941747572
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.86650485436893
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.445544554455447
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.021844660194173
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.424757281553396
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.8217821782178225
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.524752475247526
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.555825242718446
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.396039603960396
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.61407766990291
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.297029702970299
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.982673267326734
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.289603960396042
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.6674757281553387
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.8886138613861405
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.9029126213592225
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.873786407766989
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.754950495049506
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.417475728155338
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.893203883495144
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.5643564356435675
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.344059405940595
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.4108910891089135
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.299504950495052
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.148058252427183
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.825242718446601
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.534653465346537
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.113861386138614
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.871287128712874
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.039603960396041
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.4752475247524774
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.638349514563105
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.281553398058251
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.36650485436893
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.257281553398057
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.0849514563106775
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.475728155339804
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.403465346534655
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.152912621359222
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.216019417475728
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.973300970873786
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.922330097087377
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.601941747572814
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.771844660194175
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.155339805825241
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.633663366336635
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.524271844660193
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.45544554455446
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.405940594059409
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.051980198019804
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.412621359223299
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.621359223300969
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.577669902912619
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.86650485436893
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.4752475247524774
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.281553398058252
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.720297029702972
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.728155339805825
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.631067961165047
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.079207920792082
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.673267326732675
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.339805825242717
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.893203883495145
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.688118811881189
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.0849514563106775
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.594059405940595
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.727722772277228
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.24514563106796
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.830097087378639
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.225728155339804
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.311881188118814
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.178217821782179
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.728155339805824
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.0097087378640754
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.522277227722775
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.002475247524753
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.336633663366339
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.405339805825242
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.3960396039604
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.908415841584158
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.211165048543688
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.319306930693072
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.104368932038833
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.386138613861387
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.5841584158415865
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.980582524271842
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.425742574257428
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.653465346534658
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.108910891089113
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.772277227722777
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.592233009708735
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.55940594059406
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.910891089108913
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.6796116504854357
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.754950495049508
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.310679611650484
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.08009708737864
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.069306930693072
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.445544554455446
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.613861386138614
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.097087378640774
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.213592233009708
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.455445544554458
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.754950495049506
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.581683168316834
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.7621359223300965
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.405940594059409
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.017326732673269
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.308252427184465
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.6626213592233
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.141089108910894
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.061881188118813
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.287128712871288
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.529126213592232
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.4271844660194155
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.871287128712874
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.247524752475248
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.900990099009903
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.841584158415842
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.405940594059407
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.446601941747572
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.155339805825242
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.009900990099011
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.055825242718446
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.376213592233007
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.817961165048541
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.5742574257425765
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.876213592233011
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.584158415841588
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.815533980582524
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.004950495049507
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.572815533980581
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.859223300970871
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.49271844660194
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.118811881188119
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.75
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.980198019801982
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.680693069306931
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.465346534653468
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.6019417475728135
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.45792079207921
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.276699029126212
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.466019417475727
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.640776699029129
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.2281553398058245
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.172330097087377
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.271844660194173
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.259900990099012
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.193069306930694
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.720873786407766
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.368932038834949
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.048543689320386
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.932038834951454
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.9126213592233
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.104368932038833
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.07766990291262
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.793689320388348
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.223300970873784
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.223300970873785
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.820388349514563
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.490099009900991
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.299504950495051
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.713592233009706
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.4009900990099045
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.9480198019801978
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.8349514563106775
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.326732673267328
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.696601941747571
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.700495049504951
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.740291262135925
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.252475247524755
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.686893203883494
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.3366336633663405
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.081683168316832
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.5436893203883475
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.4781553398058245
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.95145631067961
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.2621359223300965
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.113861386138616
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.854368932038833
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.135922330097085
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.257281553398057
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.249999999999999
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.1941747572815515
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.70792079207921
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.184466019417473
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.448019801980199
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.024271844660197
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.349514563106794
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.841584158415843
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.653465346534654
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.0594059405940595
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.786407766990289
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.43811881188119
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.512376237623764
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.294554455445546
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.044554455445544
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.306930693069307
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.434466019417474
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.952970297029708
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.893564356435645
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.641089108910892
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.563106796116504
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.970297029702973
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.28712871287129
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.546116504854368
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.5024271844660175
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.854368932038833
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.79611650485437
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.064356435643565
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.184466019417473
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.36893203883495
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.8886138613861387
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.563106796116509
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.43564356435644
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.135922330097085
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.207920792079213
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.485436893203882
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.507281553398057
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.792079207920795
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.060679611650485
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.07038834951456
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.381188118811881
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.982673267326733
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.5742574257425757
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.982673267326735
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.25242718446602
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.652912621359222
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.691747572815533
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.376237623762378
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.257281553398056
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.603960396039607
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.584158415841585
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.665841584158418
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.108910891089112
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.754950495049508
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.213592233009708
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.759900990099013
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.034653465346536
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.74757281553398
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.071782178217823
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.266990291262134
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.106796116504852
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.720873786407765
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.7351485148514865
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.609223300970872
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.30693069306931
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.445544554455448
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.9878640776699
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.059405940594063
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.048543689320388
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.512135922330096
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.955445544554458
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.961165048543688
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.082524271844659
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.388613861386142
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.78155339805825
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.408415841584162
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.890776699029125
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.774271844660191
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.283980582524271
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.67961165048544
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.069306930693074
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.62135922330097
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.851485148514854
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.873786407766993
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.57281553398058
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.706310679611648
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.472772277227724
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.871287128712874
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.415841584158416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.851485148514853
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.801980198019805
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.13613861386139
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.022277227722773
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.932038834951455
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.099009900990103
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.900990099009902
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.5990099009901
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.069306930693072
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.799504950495053
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.93811881188119
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.148514851485149
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.259708737864077
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.492718446601941
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.841584158415844
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.24752475247525
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.707920792079208
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.7896039603960405
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.385922330097086
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.956310679611653
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.478155339805824
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.718446601941746
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.183168316831683
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.922330097087376
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.893564356435647
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.247524752475249
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.16504854368932
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.844660194174761
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.805825242718445
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.789603960396041
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.922330097087377
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.417475728155338
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.948019801980199
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.881188118811882
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.519417475728153
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.46782178217822
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.28155339805825
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.567961165048541
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.055825242718445
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.309405940594061
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.543689320388347
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.604368932038833
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.712871287128714
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.451456310679609
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.640776699029125
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.207920792079213
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.11650485436893
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.558252427184464
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.042079207920793
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.806930693069308
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.737864077669904
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.422330097087377
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.980582524271842
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.099514563106794
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.30693069306931
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.417475728155338
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.96782178217822
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.846534653465348
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.683168316831685
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.087378640776699
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.83663366336634
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.669902912621357
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.961165048543687
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.058252427184463
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.0752427184466
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.146039603960398
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.544554455445546
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.096534653465349
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.825242718446601
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.99029126213592
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.53883495145631
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.257425742574259
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.82673267326733
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.584158415841587
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.427184466019416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.3960396039604
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.925742574257427
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.955445544554454
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.212871287128715
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.9232673267326765
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.997572815533978
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.099009900990101
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.007281553398057
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.44902912621359
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.866504854368931
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.339805825242716
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.174757281553396
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.934466019417473
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.383663366336636
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.735148514851487
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.9108910891089135
+ }
+ ],
+ "test_return_min_T": [
+ 194,
+ 10374,
+ 20428,
+ 30500,
+ 40676,
+ 50873,
+ 61038,
+ 71210,
+ 81230,
+ 91272,
+ 101424,
+ 111431,
+ 121432,
+ 131673,
+ 141819,
+ 151830,
+ 161986,
+ 172109,
+ 182334,
+ 192453,
+ 202496,
+ 212599,
+ 222635,
+ 232764,
+ 242814,
+ 252819,
+ 262843,
+ 272899,
+ 283084,
+ 293094,
+ 303310,
+ 313472,
+ 323617,
+ 333784,
+ 343903,
+ 353921,
+ 364085,
+ 374287,
+ 384290,
+ 394296,
+ 404471,
+ 414560,
+ 424686,
+ 434696,
+ 444926,
+ 455104,
+ 465150,
+ 475252,
+ 485342,
+ 495399,
+ 505436,
+ 515696,
+ 525789,
+ 535930,
+ 545945,
+ 555945,
+ 566049,
+ 576122,
+ 586317,
+ 596519,
+ 606748,
+ 616889,
+ 627158,
+ 637260,
+ 647263,
+ 657273,
+ 667473,
+ 677721,
+ 687849,
+ 698027,
+ 708100,
+ 718102,
+ 728312,
+ 738459,
+ 748659,
+ 758695,
+ 768913,
+ 778922,
+ 789065,
+ 799266,
+ 809346,
+ 819530,
+ 829586,
+ 839697,
+ 849860,
+ 860025,
+ 870220,
+ 880415,
+ 890511,
+ 900525,
+ 910705,
+ 920715,
+ 930760,
+ 940766,
+ 950940,
+ 961103,
+ 971321,
+ 981450,
+ 991606,
+ 1001651,
+ 1011710,
+ 1021718,
+ 1031978,
+ 1042077,
+ 1052110,
+ 1062289,
+ 1072420,
+ 1082524,
+ 1092746,
+ 1102920,
+ 1112940,
+ 1122984,
+ 1133088,
+ 1143284,
+ 1153391,
+ 1163395,
+ 1173498,
+ 1183612,
+ 1193776,
+ 1203953,
+ 1213973,
+ 1224024,
+ 1234153,
+ 1244295,
+ 1254356,
+ 1264497,
+ 1274565,
+ 1284604,
+ 1294621,
+ 1304713,
+ 1314851,
+ 1325035,
+ 1335188,
+ 1345253,
+ 1355510,
+ 1365716,
+ 1375874,
+ 1386066,
+ 1396208,
+ 1406386,
+ 1416515,
+ 1426604,
+ 1436650,
+ 1446726,
+ 1456821,
+ 1466930,
+ 1477099,
+ 1487150,
+ 1497352,
+ 1507391,
+ 1517564,
+ 1527624,
+ 1537679,
+ 1547897,
+ 1558095,
+ 1568161,
+ 1578315,
+ 1588334,
+ 1598393,
+ 1608483,
+ 1618592,
+ 1628657,
+ 1638781,
+ 1649029,
+ 1659119,
+ 1669119,
+ 1679375,
+ 1689413,
+ 1699570,
+ 1709768,
+ 1719828,
+ 1729948,
+ 1740177,
+ 1750184,
+ 1760371,
+ 1770392,
+ 1780586,
+ 1790740,
+ 1800756,
+ 1810941,
+ 1820956,
+ 1831037,
+ 1841313,
+ 1851534,
+ 1861656,
+ 1871864,
+ 1882025,
+ 1892117,
+ 1902270,
+ 1912333,
+ 1922467,
+ 1932708,
+ 1942801,
+ 1952927,
+ 1963074,
+ 1973090,
+ 1983234,
+ 1993269,
+ 2003474,
+ 2013561,
+ 2023661,
+ 2033881,
+ 2043963,
+ 2054119,
+ 2064154,
+ 2074351,
+ 2084444,
+ 2094600,
+ 2104748,
+ 2114931,
+ 2124947,
+ 2135163,
+ 2145354,
+ 2155508,
+ 2165692,
+ 2175715,
+ 2185926,
+ 2196148,
+ 2206249,
+ 2216432,
+ 2226606,
+ 2236624,
+ 2246771,
+ 2256780,
+ 2266859,
+ 2276957,
+ 2287104,
+ 2297302,
+ 2307303,
+ 2317390,
+ 2327462,
+ 2337611,
+ 2347768,
+ 2357940,
+ 2368145,
+ 2378307,
+ 2388379,
+ 2398570,
+ 2408757,
+ 2418798,
+ 2428888,
+ 2438913,
+ 2449116,
+ 2459260,
+ 2469434,
+ 2479626,
+ 2489833,
+ 2499991,
+ 2510020,
+ 2520059,
+ 2530270,
+ 2540433,
+ 2550496,
+ 2560624,
+ 2570804,
+ 2580874,
+ 2590993,
+ 2601088,
+ 2611125,
+ 2621242,
+ 2631351,
+ 2641522,
+ 2651675,
+ 2661736,
+ 2671810,
+ 2681946,
+ 2692137,
+ 2702320,
+ 2712415,
+ 2722550,
+ 2732570,
+ 2742690,
+ 2752870,
+ 2762941,
+ 2773018,
+ 2783159,
+ 2793224,
+ 2803361,
+ 2813487,
+ 2823679,
+ 2833919,
+ 2844122,
+ 2854371,
+ 2864515,
+ 2874568,
+ 2884568,
+ 2894772,
+ 2904828,
+ 2914915,
+ 2925139,
+ 2935385,
+ 2945575,
+ 2955784,
+ 2965878,
+ 2976079,
+ 2986111,
+ 2996149,
+ 3006390,
+ 3016621,
+ 3026662,
+ 3036882,
+ 3046930,
+ 3057109,
+ 3067327,
+ 3077504,
+ 3087571,
+ 3097605,
+ 3107719,
+ 3117777,
+ 3128009,
+ 3138168,
+ 3148229,
+ 3158481,
+ 3168634,
+ 3178702,
+ 3188844,
+ 3199082,
+ 3209229,
+ 3219299,
+ 3229532,
+ 3239753,
+ 3249900,
+ 3259919,
+ 3270054,
+ 3280228,
+ 3290271,
+ 3300374,
+ 3310465,
+ 3320623,
+ 3330643,
+ 3340717,
+ 3350810,
+ 3360905,
+ 3371083,
+ 3381150,
+ 3391367,
+ 3401585,
+ 3411654,
+ 3421858,
+ 3431907,
+ 3442063,
+ 3452100,
+ 3462350,
+ 3472490,
+ 3482659,
+ 3492889,
+ 3503095,
+ 3513118,
+ 3523288,
+ 3533471,
+ 3543630,
+ 3553721,
+ 3563794,
+ 3573998,
+ 3584105,
+ 3594290,
+ 3604367,
+ 3614553,
+ 3624782,
+ 3634831,
+ 3644898,
+ 3654942,
+ 3664952,
+ 3675136,
+ 3685260,
+ 3695334,
+ 3705582,
+ 3715735,
+ 3725762,
+ 3735774,
+ 3745999,
+ 3756145,
+ 3766225,
+ 3776477,
+ 3786555,
+ 3796691,
+ 3806809,
+ 3816898,
+ 3826969,
+ 3837146,
+ 3847388,
+ 3857506,
+ 3867569,
+ 3877601,
+ 3887789,
+ 3897865,
+ 3907956,
+ 3918081,
+ 3928149,
+ 3938292,
+ 3948357,
+ 3958542,
+ 3968658,
+ 3978747,
+ 3988818,
+ 3999000,
+ 4009013,
+ 4019014,
+ 4029173,
+ 4039250,
+ 4049428
+ ],
+ "test_return_std": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.1898801075110269
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.35544494241512786
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.065241476529621
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.493662910729761
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.2144470342368034
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.3036809723677205
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.747829653826123
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.8327584368423215
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.002312194411263
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.255531513684166
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.578253785045838
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.589005889144159
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.2617854302457925
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.924659398842587
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.051446396547661
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.248623359861616
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.877412938453834
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.142265230917954
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.042591850653418
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.9010230707610996
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.9432843521221566
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.834876565897337
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.958889146664193
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.6064581640804985
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.772218397994177
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.344765411393713
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.8457748116179475
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.225010985540304
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.013247944364982
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.666222530514429
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.542340477648326
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.62657235122551
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.9253144113415965
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.678539261517772
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.034829199895245
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.389468607656657
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.638329635467785
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.487172277262877
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.9866904016900833
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.293379978818724
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.7379954291844
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.229603504091668
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.60111414632039
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.761939997968234
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.634579494237832
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.403300008053834
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.437202345929911
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.6491007273435367
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.216239954740649
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.669448493216671
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.4546770502823785
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.612270618283583
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.1458342193145095
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.063578307545281
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.7072450753290465
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.5535069231704135
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.738603450120559
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.467959242194594
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.355048720685233
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.697971591585251
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.123400951597896
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.084806015267755
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.75090719860725
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.870333225085276
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.225149031187609
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.548921160477075
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.171031051521002
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.852491911651396
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.665465994544291
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.333511616151691
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.5822101143191345
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.1360765061731835
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.995466681172571
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.309921182062058
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.467038547937365
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.88648490244652
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.751565160102057
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.944607767342269
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.265061279328213
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.506361095202223
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.130537751727418
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.6068566915393445
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.367293280888446
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.495564167762124
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.839630781458923
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.531304963328086
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.743545402296668
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.187310391681912
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.663906607764791
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.704570754843409
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.198077434094466
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.097559882556712
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.306299706982586
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.635279417996206
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.040360056706845
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.396615157212982
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.191863085642323
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.128284617248467
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.655926937975799
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.886332315663311
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.512236905477517
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.430698576686728
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.176330708569383
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.289518045899739
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.862969106880747
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.465169079424628
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.681044514588261
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.505009436195906
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.718293152535779
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.377461791957638
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.116503224685622
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.467406508729975
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.84843799498243
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.6631827488235778
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.986073120992123
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.694383217701764
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.4829892242770075
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.578987028996757
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.044889984038386
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.251558223671711
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.38764679633627
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.118571667431714
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.9560200568097064
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.424819306191702
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.060488789368524
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.19781625825739
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.558007120004227
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.895047042231245
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.144088362565814
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.176033338683641
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.6238034319168495
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.182783921991301
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.530869921658412
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.6584593539943238
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.905232817156662
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.240009211756495
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.109088807716052
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.705021690936598
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.332930648378278
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.368963537596853
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.558748259788059
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.491501861556313
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.595603906555603
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.6981792199454375
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.478115998995864
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.187761312412553
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.912896974032368
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.259963244160931
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.680044500839257
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.660915867388388
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.5631318129703833
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.84920561855788
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.661822762551967
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.750319126466291
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.011608898684636
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.6481769059991125
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.707340394459718
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.896950813026423
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.233398777408491
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.5006566475528755
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.294409801601843
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.934430324505455
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.119817203127225
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.441999005825284
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.797153244121279
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.798276745272207
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.222535373165184
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.233480981890176
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.903565798026274
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.144901408635618
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.908200222293249
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.0957273206603
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.110653969672149
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.793510609859135
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.28959999422197
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.707724050450835
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.299745342713634
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.9357434797596826
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.798104782781828
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.089676946371956
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.849046157723561
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.697324458821124
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.212057763173226
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.072981750802792
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.467415574109942
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.805579314312257
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.937071088745303
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.160374765338391
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.593892433678953
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.805832113434191
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.467941690214024
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.86191638681655
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.7685599265946
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.941673168918214
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.920528952399553
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.484424729506524
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.353112456750073
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.775917235163736
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.586332365722475
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.7626535421409253
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.8671147345903254
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.64290532219202
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.6669983923950924
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.878865284914021
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.323598171903001
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.1944234242962155
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.508545220348321
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.93113051786581
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.8337790689258986
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.086588706765814
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.5871162657316327
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.255876486699404
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.451310438772333
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.088635475776291
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.35244223664382
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.986146535108127
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.735943600344336
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.625240200588813
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.8435068132213965
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.545624450925205
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.458774057126219
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.27486478332212
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.492203049574821
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.035717291223452
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.738832732412835
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.343138121332525
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.983329870600319
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.418616754519035
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.318808724237615
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.78759005503654
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.952424004105306
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.848275612584029
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.3208157585554146
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.588089109517753
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.946297988888577
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.396602461850462
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.909797506467687
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.679663052137839
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.504170642622092
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.477613482428466
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.998673618134426
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.996830031574134
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.5474898783292925
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.546798565772442
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.725947940706102
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.176987181140808
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.462252321973773
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.899649372760319
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.775469244104113
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.882656213282715
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.580168572340918
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.157511076096821
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.173496491824898
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.170776036067348
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.827942451237982
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.804131880508487
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.163044577357492
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.8608215806629085
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.772144135081793
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.773245772904985
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.539234816540899
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.111256686359893
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.333292541395871
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.604229865807848
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.800328486741984
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.653481690359492
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.96230911029894
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.353151291735772
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.786910883686186
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.063123026422441
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.611017618674563
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.46838733376144
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.733758200768987
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.151991486045364
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.906575438534803
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.9639455631330978
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.389029945978463
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.784442783991798
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.156927295682182
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.796721789608031
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.019596162358732
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.846635411826216
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.886521066321786
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.505490859298652
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.246548307477017
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.539950399262035
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.211952489220965
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.8902404206762786
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.9048237772583785
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.801454167619388
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.6088816117675355
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.713672939357621
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.6361691623268872
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.416782979703486
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.1410739951141755
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.537716743450981
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.7512313653306886
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.012875429935916
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.21804199122843
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.1454288225332165
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.9495439748255006
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.963644080058293
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.112902302662033
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.9035192965101855
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.428926100370422
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.8125983873303753
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.6545077013767604
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.638365223679001
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.007490382251782
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.159364524729498
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.579641394132399
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.260225332552048
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.1661607991640786
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.727453809686211
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.08892301905931
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.386639024125914
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.307336135760809
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.117305889136928
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.841435140923751
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.954992401055114
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.343386402526595
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.4705072088872875
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.568229681929247
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.592153033401368
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.6300485478603415
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.108019107462725
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.93189345900955
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.320493313927562
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.5399628040662146
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.8478762116289404
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.191801771896762
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.4932600998028045
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.7029675817045336
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.799361146337354
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.963317035227865
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.555528893123447
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.110272474518015
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.1188211356500375
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.5181436885579345
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.421490951400799
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.414551417660708
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.014614823434898
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.254199477534369
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.985877398160445
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.181935985348688
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.184795141189345
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.792909444161161
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.9345854568250775
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.84199679048769
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.957332905431141
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.5643779916609155
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.261995181437866
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.327293778924689
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.7541745988551725
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.4098659604222545
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.253873343699273
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.575838137376606
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.786161795330965
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.272637261504266
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.157502897815313
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.75809068373294
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.726321921221197
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.0548918717029405
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.810073957998
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.979438592553333
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.652230756363726
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.107551758979255
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.694339241509312
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.113305322348268
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.0618916013647475
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.729386101766144
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.504319389421429
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.803783502249752
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.430099909660059
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.104216050766853
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.640388051871825
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.265330405991333
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.756263625587094
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.310724111008514
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.6465018547166625
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.460060603071488
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.059208944368926
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.592519163223589
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.361023520274448
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.369656570348583
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.893853383555625
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.882020389733159
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.740892505229294
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.04563644730044
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.267497191359626
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.326418187658426
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.293204307085293
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.639161934876153
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.589128376400236
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.878475225502376
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.489214399330092
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.873804313422151
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.91084993205328
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.56657174336851
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.4118846677222345
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.9832547362940165
+ }
+ ],
+ "test_return_std_T": [
+ 194,
+ 10374,
+ 20428,
+ 30500,
+ 40676,
+ 50873,
+ 61038,
+ 71210,
+ 81230,
+ 91272,
+ 101424,
+ 111431,
+ 121432,
+ 131673,
+ 141819,
+ 151830,
+ 161986,
+ 172109,
+ 182334,
+ 192453,
+ 202496,
+ 212599,
+ 222635,
+ 232764,
+ 242814,
+ 252819,
+ 262843,
+ 272899,
+ 283084,
+ 293094,
+ 303310,
+ 313472,
+ 323617,
+ 333784,
+ 343903,
+ 353921,
+ 364085,
+ 374287,
+ 384290,
+ 394296,
+ 404471,
+ 414560,
+ 424686,
+ 434696,
+ 444926,
+ 455104,
+ 465150,
+ 475252,
+ 485342,
+ 495399,
+ 505436,
+ 515696,
+ 525789,
+ 535930,
+ 545945,
+ 555945,
+ 566049,
+ 576122,
+ 586317,
+ 596519,
+ 606748,
+ 616889,
+ 627158,
+ 637260,
+ 647263,
+ 657273,
+ 667473,
+ 677721,
+ 687849,
+ 698027,
+ 708100,
+ 718102,
+ 728312,
+ 738459,
+ 748659,
+ 758695,
+ 768913,
+ 778922,
+ 789065,
+ 799266,
+ 809346,
+ 819530,
+ 829586,
+ 839697,
+ 849860,
+ 860025,
+ 870220,
+ 880415,
+ 890511,
+ 900525,
+ 910705,
+ 920715,
+ 930760,
+ 940766,
+ 950940,
+ 961103,
+ 971321,
+ 981450,
+ 991606,
+ 1001651,
+ 1011710,
+ 1021718,
+ 1031978,
+ 1042077,
+ 1052110,
+ 1062289,
+ 1072420,
+ 1082524,
+ 1092746,
+ 1102920,
+ 1112940,
+ 1122984,
+ 1133088,
+ 1143284,
+ 1153391,
+ 1163395,
+ 1173498,
+ 1183612,
+ 1193776,
+ 1203953,
+ 1213973,
+ 1224024,
+ 1234153,
+ 1244295,
+ 1254356,
+ 1264497,
+ 1274565,
+ 1284604,
+ 1294621,
+ 1304713,
+ 1314851,
+ 1325035,
+ 1335188,
+ 1345253,
+ 1355510,
+ 1365716,
+ 1375874,
+ 1386066,
+ 1396208,
+ 1406386,
+ 1416515,
+ 1426604,
+ 1436650,
+ 1446726,
+ 1456821,
+ 1466930,
+ 1477099,
+ 1487150,
+ 1497352,
+ 1507391,
+ 1517564,
+ 1527624,
+ 1537679,
+ 1547897,
+ 1558095,
+ 1568161,
+ 1578315,
+ 1588334,
+ 1598393,
+ 1608483,
+ 1618592,
+ 1628657,
+ 1638781,
+ 1649029,
+ 1659119,
+ 1669119,
+ 1679375,
+ 1689413,
+ 1699570,
+ 1709768,
+ 1719828,
+ 1729948,
+ 1740177,
+ 1750184,
+ 1760371,
+ 1770392,
+ 1780586,
+ 1790740,
+ 1800756,
+ 1810941,
+ 1820956,
+ 1831037,
+ 1841313,
+ 1851534,
+ 1861656,
+ 1871864,
+ 1882025,
+ 1892117,
+ 1902270,
+ 1912333,
+ 1922467,
+ 1932708,
+ 1942801,
+ 1952927,
+ 1963074,
+ 1973090,
+ 1983234,
+ 1993269,
+ 2003474,
+ 2013561,
+ 2023661,
+ 2033881,
+ 2043963,
+ 2054119,
+ 2064154,
+ 2074351,
+ 2084444,
+ 2094600,
+ 2104748,
+ 2114931,
+ 2124947,
+ 2135163,
+ 2145354,
+ 2155508,
+ 2165692,
+ 2175715,
+ 2185926,
+ 2196148,
+ 2206249,
+ 2216432,
+ 2226606,
+ 2236624,
+ 2246771,
+ 2256780,
+ 2266859,
+ 2276957,
+ 2287104,
+ 2297302,
+ 2307303,
+ 2317390,
+ 2327462,
+ 2337611,
+ 2347768,
+ 2357940,
+ 2368145,
+ 2378307,
+ 2388379,
+ 2398570,
+ 2408757,
+ 2418798,
+ 2428888,
+ 2438913,
+ 2449116,
+ 2459260,
+ 2469434,
+ 2479626,
+ 2489833,
+ 2499991,
+ 2510020,
+ 2520059,
+ 2530270,
+ 2540433,
+ 2550496,
+ 2560624,
+ 2570804,
+ 2580874,
+ 2590993,
+ 2601088,
+ 2611125,
+ 2621242,
+ 2631351,
+ 2641522,
+ 2651675,
+ 2661736,
+ 2671810,
+ 2681946,
+ 2692137,
+ 2702320,
+ 2712415,
+ 2722550,
+ 2732570,
+ 2742690,
+ 2752870,
+ 2762941,
+ 2773018,
+ 2783159,
+ 2793224,
+ 2803361,
+ 2813487,
+ 2823679,
+ 2833919,
+ 2844122,
+ 2854371,
+ 2864515,
+ 2874568,
+ 2884568,
+ 2894772,
+ 2904828,
+ 2914915,
+ 2925139,
+ 2935385,
+ 2945575,
+ 2955784,
+ 2965878,
+ 2976079,
+ 2986111,
+ 2996149,
+ 3006390,
+ 3016621,
+ 3026662,
+ 3036882,
+ 3046930,
+ 3057109,
+ 3067327,
+ 3077504,
+ 3087571,
+ 3097605,
+ 3107719,
+ 3117777,
+ 3128009,
+ 3138168,
+ 3148229,
+ 3158481,
+ 3168634,
+ 3178702,
+ 3188844,
+ 3199082,
+ 3209229,
+ 3219299,
+ 3229532,
+ 3239753,
+ 3249900,
+ 3259919,
+ 3270054,
+ 3280228,
+ 3290271,
+ 3300374,
+ 3310465,
+ 3320623,
+ 3330643,
+ 3340717,
+ 3350810,
+ 3360905,
+ 3371083,
+ 3381150,
+ 3391367,
+ 3401585,
+ 3411654,
+ 3421858,
+ 3431907,
+ 3442063,
+ 3452100,
+ 3462350,
+ 3472490,
+ 3482659,
+ 3492889,
+ 3503095,
+ 3513118,
+ 3523288,
+ 3533471,
+ 3543630,
+ 3553721,
+ 3563794,
+ 3573998,
+ 3584105,
+ 3594290,
+ 3604367,
+ 3614553,
+ 3624782,
+ 3634831,
+ 3644898,
+ 3654942,
+ 3664952,
+ 3675136,
+ 3685260,
+ 3695334,
+ 3705582,
+ 3715735,
+ 3725762,
+ 3735774,
+ 3745999,
+ 3756145,
+ 3766225,
+ 3776477,
+ 3786555,
+ 3796691,
+ 3806809,
+ 3816898,
+ 3826969,
+ 3837146,
+ 3847388,
+ 3857506,
+ 3867569,
+ 3877601,
+ 3887789,
+ 3897865,
+ 3907956,
+ 3918081,
+ 3928149,
+ 3938292,
+ 3948357,
+ 3958542,
+ 3968658,
+ 3978747,
+ 3988818,
+ 3999000,
+ 4009013,
+ 4019014,
+ 4029173,
+ 4039250,
+ 4049428
+ ],
+ "worker_loss": [
+ 915.6021728515625,
+ 283.37091064453125,
+ 304.6099853515625,
+ 444.35406494140625,
+ 498.6358337402344,
+ 592.74462890625,
+ 513.7432861328125,
+ 669.7061767578125,
+ 888.0849609375,
+ 614.3201904296875,
+ 850.7722778320312,
+ 1204.80224609375,
+ 937.0169677734375,
+ 795.962890625,
+ 898.9736328125,
+ 963.631103515625,
+ 1092.1907958984375,
+ 962.9155883789062,
+ 907.364013671875,
+ 1029.1292724609375,
+ 908.1600952148438,
+ 1133.927978515625,
+ 893.3575439453125,
+ 992.8162231445312,
+ 791.2206420898438,
+ 742.6759033203125,
+ 760.3108520507812,
+ 1146.9439697265625,
+ 802.66455078125,
+ 934.5284423828125,
+ 1222.7408447265625,
+ 1067.08935546875,
+ 769.2294921875,
+ 1101.49365234375,
+ 984.8493041992188,
+ 900.95947265625,
+ 1101.5245361328125,
+ 1053.9210205078125,
+ 791.9821166992188,
+ 976.6781005859375,
+ 845.4638671875,
+ 1216.699462890625,
+ 930.520263671875,
+ 1099.4354248046875,
+ 1075.08544921875,
+ 1163.4761962890625,
+ 963.4404296875,
+ 1082.733154296875,
+ 2221.484375,
+ 1172.889892578125,
+ 1441.527099609375,
+ 1163.584228515625,
+ 1241.7305908203125,
+ 1114.4111328125,
+ 932.5099487304688,
+ 1531.474853515625,
+ 1037.705322265625,
+ 1837.909912109375,
+ 1464.36767578125,
+ 2087.916015625,
+ 1014.6162719726562,
+ 1082.320556640625,
+ 1224.0848388671875,
+ 1531.73876953125,
+ 1429.97021484375,
+ 1498.840087890625,
+ 1914.2325439453125,
+ 2369.5439453125,
+ 1882.5543212890625,
+ 1573.5384521484375,
+ 1047.5501708984375,
+ 1890.388671875,
+ 1171.416015625,
+ 1032.4580078125,
+ 1031.8173828125,
+ 1493.7803955078125,
+ 1881.7947998046875,
+ 1149.7598876953125,
+ 1154.2288818359375,
+ 1406.813232421875,
+ 1520.0498046875,
+ 2146.807861328125,
+ 1095.5450439453125,
+ 1134.7174072265625,
+ 1049.7637939453125,
+ 1583.677734375,
+ 996.412109375,
+ 1673.39892578125,
+ 954.998779296875,
+ 918.9017333984375,
+ 1242.617431640625,
+ 1052.5899658203125,
+ 1124.4771728515625,
+ 1135.7677001953125,
+ 1439.0098876953125,
+ 1003.3856201171875,
+ 1005.507568359375,
+ 1017.7389526367188,
+ 1179.816650390625,
+ 1639.91162109375,
+ 1324.5245361328125,
+ 844.5614013671875,
+ 1721.58056640625,
+ 947.8971557617188,
+ 1513.402099609375,
+ 931.48095703125,
+ 949.9196166992188,
+ 1015.7347412109375,
+ 1857.423583984375,
+ 1259.5244140625,
+ 1672.64892578125,
+ 941.976806640625,
+ 1392.915283203125,
+ 1028.0888671875,
+ 1509.995849609375,
+ 1177.5574951171875,
+ 1333.437744140625,
+ 1331.331787109375,
+ 1601.102294921875,
+ 2010.9207763671875,
+ 1238.7344970703125,
+ 1102.92333984375,
+ 1848.84521484375,
+ 1335.6285400390625,
+ 1122.5523681640625,
+ 1415.6373291015625,
+ 1149.6064453125,
+ 1435.184814453125,
+ 1090.35888671875,
+ 1371.8726806640625,
+ 1430.010498046875,
+ 1225.0670166015625,
+ 976.5042724609375,
+ 1586.0716552734375,
+ 1253.61865234375,
+ 1088.587158203125,
+ 1330.263916015625,
+ 1590.0146484375,
+ 1123.66748046875,
+ 1044.860107421875,
+ 1688.32177734375,
+ 930.7406616210938,
+ 1011.0568237304688,
+ 1685.9569091796875,
+ 1440.8123779296875,
+ 1643.699951171875,
+ 1610.55126953125,
+ 1068.3878173828125,
+ 1126.630126953125,
+ 1214.0859375,
+ 1733.90869140625,
+ 1719.5421142578125,
+ 1708.352783203125,
+ 1184.8193359375,
+ 1125.976806640625,
+ 1252.324951171875,
+ 1546.213623046875,
+ 1808.35791015625,
+ 1313.126220703125,
+ 1449.492919921875,
+ 1039.614990234375,
+ 1510.51220703125,
+ 1429.06103515625,
+ 1517.162841796875,
+ 1290.8011474609375,
+ 1979.9912109375,
+ 1272.061279296875,
+ 1069.784912109375,
+ 1420.0032958984375,
+ 1460.7171630859375,
+ 1248.832275390625,
+ 1451.794921875,
+ 1148.699462890625,
+ 1267.498779296875,
+ 1145.60205078125,
+ 1504.9560546875,
+ 1198.69775390625,
+ 1318.455322265625,
+ 1425.14990234375,
+ 1212.656982421875,
+ 1211.61328125,
+ 1417.36376953125,
+ 1324.47265625,
+ 1579.7130126953125,
+ 1939.5166015625,
+ 1382.9884033203125,
+ 2638.923828125,
+ 1797.96240234375,
+ 2067.80029296875,
+ 1608.042724609375,
+ 1452.492919921875,
+ 1395.331787109375,
+ 1519.00732421875,
+ 1754.317626953125,
+ 1337.275634765625,
+ 1743.37646484375,
+ 1787.5623779296875,
+ 2186.3076171875,
+ 1983.782470703125,
+ 2566.02392578125,
+ 1710.7647705078125,
+ 1983.712158203125,
+ 1957.517333984375,
+ 2012.533447265625,
+ 1922.3011474609375,
+ 2354.553466796875,
+ 1273.660888671875,
+ 1899.775634765625,
+ 1319.31005859375,
+ 2382.056640625,
+ 1509.6129150390625,
+ 1472.543701171875,
+ 1608.03564453125,
+ 1540.736328125,
+ 1592.435302734375,
+ 1663.64501953125,
+ 1299.611572265625,
+ 1836.122802734375,
+ 1831.855712890625,
+ 2041.537841796875,
+ 1354.46728515625,
+ 1568.1011962890625,
+ 1835.2484130859375,
+ 1305.0888671875,
+ 1363.780029296875,
+ 1551.767578125,
+ 1618.05224609375,
+ 1792.7960205078125,
+ 1894.34033203125,
+ 2220.0400390625,
+ 1318.0400390625,
+ 1265.9150390625,
+ 1621.037841796875,
+ 1401.1607666015625,
+ 1694.367919921875,
+ 1529.4404296875,
+ 1661.196533203125,
+ 1878.9915771484375,
+ 1257.94140625,
+ 1347.1094970703125,
+ 1609.9146728515625,
+ 1546.076416015625,
+ 1379.3515625,
+ 1473.0328369140625,
+ 1304.16943359375,
+ 1802.7158203125,
+ 1426.19140625,
+ 1393.7579345703125,
+ 1710.919189453125,
+ 1413.833251953125,
+ 1241.219970703125,
+ 1387.2493896484375,
+ 1733.53076171875,
+ 1754.2796630859375,
+ 1999.852783203125,
+ 1614.37841796875,
+ 1174.029296875,
+ 1682.891845703125,
+ 1912.3267822265625,
+ 1290.185302734375,
+ 1151.427001953125,
+ 1454.170654296875,
+ 1230.0458984375,
+ 1627.8544921875,
+ 1383.076171875,
+ 1746.76123046875,
+ 1383.690673828125,
+ 1228.348388671875,
+ 1234.753662109375,
+ 1502.2911376953125,
+ 1256.517578125,
+ 1301.7457275390625,
+ 1146.398681640625,
+ 1764.9189453125,
+ 1162.022705078125,
+ 1396.6109619140625,
+ 1424.345947265625,
+ 1353.28662109375,
+ 1223.26318359375,
+ 1196.3856201171875,
+ 1310.476318359375,
+ 1163.624755859375,
+ 1096.2122802734375,
+ 1299.426025390625,
+ 1107.069091796875,
+ 974.140380859375,
+ 1065.568359375,
+ 992.3743286132812,
+ 1441.25537109375,
+ 1505.868408203125,
+ 1407.3416748046875,
+ 1151.05810546875,
+ 1174.8746337890625,
+ 1065.8843994140625,
+ 1309.898681640625,
+ 1042.4820556640625,
+ 984.3803100585938,
+ 1224.7220458984375,
+ 1311.293701171875,
+ 1115.14892578125,
+ 1279.68359375,
+ 1396.8297119140625,
+ 1109.80126953125,
+ 1249.17041015625,
+ 1216.4024658203125,
+ 1370.32861328125,
+ 1162.5665283203125,
+ 991.98828125,
+ 1714.2958984375,
+ 937.415771484375,
+ 1087.4510498046875,
+ 1127.757080078125,
+ 1048.7076416015625,
+ 1264.9697265625,
+ 934.5494995117188,
+ 1139.691162109375,
+ 1085.826904296875,
+ 1083.858154296875,
+ 1277.023681640625,
+ 1079.601318359375,
+ 1773.568115234375,
+ 1358.7686767578125,
+ 1041.9959716796875,
+ 1014.6148681640625,
+ 1315.102783203125,
+ 1222.979736328125,
+ 1971.904052734375,
+ 1087.0352783203125,
+ 1248.97314453125,
+ 1115.0921630859375,
+ 1251.677978515625,
+ 1437.54345703125,
+ 1162.0830078125,
+ 1365.8909912109375,
+ 1204.659912109375,
+ 1251.1173095703125,
+ 1488.021240234375,
+ 1227.292724609375,
+ 1154.8095703125,
+ 1178.43505859375,
+ 1074.482177734375,
+ 908.6107177734375,
+ 1066.27099609375,
+ 1225.428466796875,
+ 1357.348876953125,
+ 1221.044921875,
+ 1115.20947265625,
+ 1130.2996826171875,
+ 1391.6871337890625,
+ 1418.0023193359375,
+ 1102.37646484375,
+ 1359.5760498046875,
+ 1126.7431640625,
+ 1417.1221923828125,
+ 1049.874267578125,
+ 997.9721069335938,
+ 920.1085205078125,
+ 1205.89990234375,
+ 875.00732421875,
+ 1250.2491455078125,
+ 958.716064453125,
+ 913.555419921875,
+ 1427.3426513671875,
+ 927.552490234375,
+ 964.563232421875,
+ 1214.22509765625,
+ 1116.7064208984375,
+ 920.4213256835938,
+ 1079.59375,
+ 1146.490966796875,
+ 978.514892578125,
+ 934.3594970703125,
+ 875.7136840820312,
+ 1151.192138671875,
+ 909.132568359375,
+ 1006.4833374023438,
+ 1038.76318359375,
+ 1337.720703125,
+ 1080.9166259765625,
+ 1163.696533203125,
+ 1492.567626953125,
+ 1030.7725830078125,
+ 1119.9034423828125,
+ 934.19970703125,
+ 1104.844482421875,
+ 930.3489379882812,
+ 1461.073974609375,
+ 982.730712890625,
+ 963.1357421875,
+ 960.628662109375,
+ 1136.035888671875,
+ 997.155029296875,
+ 1090.32080078125,
+ 1104.7919921875,
+ 1151.32177734375,
+ 1135.925048828125,
+ 982.5270385742188,
+ 1087.707763671875,
+ 1041.282470703125,
+ 1056.9083251953125
+ ],
+ "worker_loss_T": [
+ 7092,
+ 17295,
+ 27360,
+ 37425,
+ 47446,
+ 57702,
+ 67766,
+ 77887,
+ 88117,
+ 98180,
+ 108201,
+ 118363,
+ 128491,
+ 138566,
+ 148760,
+ 158815,
+ 169025,
+ 179049,
+ 189282,
+ 199401,
+ 209460,
+ 219584,
+ 229726,
+ 239732,
+ 249735,
+ 259775,
+ 269808,
+ 279861,
+ 289940,
+ 299973,
+ 310048,
+ 320136,
+ 330150,
+ 340224,
+ 350309,
+ 360399,
+ 370496,
+ 380586,
+ 390819,
+ 400869,
+ 410935,
+ 420950,
+ 431080,
+ 441210,
+ 451272,
+ 461380,
+ 471442,
+ 481588,
+ 491642,
+ 501803,
+ 511970,
+ 522049,
+ 532228,
+ 542420,
+ 552444,
+ 562617,
+ 572728,
+ 583006,
+ 593033,
+ 603151,
+ 613362,
+ 623380,
+ 633578,
+ 643643,
+ 653716,
+ 663826,
+ 673980,
+ 684046,
+ 694187,
+ 704361,
+ 714366,
+ 724371,
+ 734482,
+ 744535,
+ 754765,
+ 764823,
+ 774937,
+ 785022,
+ 795248,
+ 805473,
+ 815528,
+ 825625,
+ 835868,
+ 845907,
+ 856040,
+ 866047,
+ 876264,
+ 886567,
+ 896651,
+ 906842,
+ 916946,
+ 927044,
+ 937159,
+ 947297,
+ 957483,
+ 967619,
+ 977685,
+ 987931,
+ 997992,
+ 1008057,
+ 1018161,
+ 1028173,
+ 1038181,
+ 1048423,
+ 1058516,
+ 1068664,
+ 1078745,
+ 1088915,
+ 1098982,
+ 1109051,
+ 1119248,
+ 1129476,
+ 1139634,
+ 1149642,
+ 1159787,
+ 1169921,
+ 1179950,
+ 1190126,
+ 1200324,
+ 1210423,
+ 1220527,
+ 1230722,
+ 1240952,
+ 1250992,
+ 1261097,
+ 1271343,
+ 1281413,
+ 1291499,
+ 1301710,
+ 1311847,
+ 1321892,
+ 1331977,
+ 1341990,
+ 1352171,
+ 1362304,
+ 1372388,
+ 1382502,
+ 1392710,
+ 1402796,
+ 1412887,
+ 1422977,
+ 1433185,
+ 1443192,
+ 1453280,
+ 1463367,
+ 1473426,
+ 1483538,
+ 1493651,
+ 1503789,
+ 1513807,
+ 1524054,
+ 1534270,
+ 1544475,
+ 1554621,
+ 1564794,
+ 1574902,
+ 1584946,
+ 1594969,
+ 1605030,
+ 1615240,
+ 1625376,
+ 1635501,
+ 1645593,
+ 1655647,
+ 1665666,
+ 1675690,
+ 1685894,
+ 1695985,
+ 1706141,
+ 1716287,
+ 1726355,
+ 1736463,
+ 1746631,
+ 1756661,
+ 1766740,
+ 1776977,
+ 1787103,
+ 1797200,
+ 1807240,
+ 1817361,
+ 1827400,
+ 1837414,
+ 1847427,
+ 1857487,
+ 1867647,
+ 1877732,
+ 1887967,
+ 1898042,
+ 1908247,
+ 1918306,
+ 1928494,
+ 1938721,
+ 1948923,
+ 1959067,
+ 1969319,
+ 1979533,
+ 1989846,
+ 1999888,
+ 2009996,
+ 2020061,
+ 2030264,
+ 2040327,
+ 2050460,
+ 2060522,
+ 2070580,
+ 2080743,
+ 2090936,
+ 2101068,
+ 2111206,
+ 2121377,
+ 2131404,
+ 2141639,
+ 2151758,
+ 2161825,
+ 2171926,
+ 2181983,
+ 2192015,
+ 2202021,
+ 2212255,
+ 2222394,
+ 2232596,
+ 2242789,
+ 2252936,
+ 2262978,
+ 2273109,
+ 2283149,
+ 2293150,
+ 2303299,
+ 2313529,
+ 2323715,
+ 2333847,
+ 2343908,
+ 2354027,
+ 2364236,
+ 2374408,
+ 2384421,
+ 2394449,
+ 2404570,
+ 2414720,
+ 2424739,
+ 2434910,
+ 2444954,
+ 2455136,
+ 2465287,
+ 2475416,
+ 2485480,
+ 2495611,
+ 2505778,
+ 2516009,
+ 2526028,
+ 2536301,
+ 2546337,
+ 2556388,
+ 2566565,
+ 2576644,
+ 2586843,
+ 2596917,
+ 2607004,
+ 2617084,
+ 2627196,
+ 2637376,
+ 2647508,
+ 2657647,
+ 2667719,
+ 2677916,
+ 2688062,
+ 2698165,
+ 2708266,
+ 2718409,
+ 2728434,
+ 2738546,
+ 2748676,
+ 2758916,
+ 2768940,
+ 2779034,
+ 2789172,
+ 2799238,
+ 2809325,
+ 2819376,
+ 2829617,
+ 2839810,
+ 2850035,
+ 2860186,
+ 2870357,
+ 2880499,
+ 2890628,
+ 2900645,
+ 2910940,
+ 2921130,
+ 2931376,
+ 2941396,
+ 2951566,
+ 2961627,
+ 2971677,
+ 2981706,
+ 2991857,
+ 3001949,
+ 3012036,
+ 3022219,
+ 3032228,
+ 3042327,
+ 3052539,
+ 3062795,
+ 3072890,
+ 3082958,
+ 3093114,
+ 3103171,
+ 3113209,
+ 3123383,
+ 3133426,
+ 3143440,
+ 3153512,
+ 3163568,
+ 3173608,
+ 3183697,
+ 3193908,
+ 3203975,
+ 3214077,
+ 3224153,
+ 3234356,
+ 3244419,
+ 3254536,
+ 3264737,
+ 3274833,
+ 3284908,
+ 3295136,
+ 3305228,
+ 3315439,
+ 3325546,
+ 3335646,
+ 3345788,
+ 3355992,
+ 3366175,
+ 3376417,
+ 3386451,
+ 3396577,
+ 3406704,
+ 3416900,
+ 3427128,
+ 3437217,
+ 3447329,
+ 3457428,
+ 3467437,
+ 3477621,
+ 3487827,
+ 3497962,
+ 3508158,
+ 3518325,
+ 3528428,
+ 3538602,
+ 3548805,
+ 3558923,
+ 3569065,
+ 3579228,
+ 3589313,
+ 3599339,
+ 3609365,
+ 3619431,
+ 3629497,
+ 3639722,
+ 3649807,
+ 3660057,
+ 3670255,
+ 3680439,
+ 3690694,
+ 3700742,
+ 3710792,
+ 3720932,
+ 3731140,
+ 3741222,
+ 3751315,
+ 3761564,
+ 3771760,
+ 3781886,
+ 3792129,
+ 3802264,
+ 3812528,
+ 3822619,
+ 3832758,
+ 3842873,
+ 3852997,
+ 3863097,
+ 3873229,
+ 3883380,
+ 3893402,
+ 3903403,
+ 3913426,
+ 3923558,
+ 3933711,
+ 3943715,
+ 3953867,
+ 3963931,
+ 3974121,
+ 3984302,
+ 3994504,
+ 4004625,
+ 4014639,
+ 4024664,
+ 4034759,
+ 4044842
+ ]
+}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/feudal/5/metrics.json b/results/sacred/10gen_protoss/feudal/5/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/10gen_protoss/feudal/5/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/feudal/5/run.json b/results/sacred/10gen_protoss/feudal/5/run.json
new file mode 100644
index 0000000..83bd40b
--- /dev/null
+++ b/results/sacred/10gen_protoss/feudal/5/run.json
@@ -0,0 +1,124 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_654daaa6534bcee62784d639ea63e51d.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "fail_trace": [
+ "Traceback (most recent call last):\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\sacred\\config\\captured_function.py\", line 42, in captured_function\n result = wrapped(*args, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\main.py\", line 44, in my_main\n run_REGISTRY[_config['run']](_run, config, _log)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 128, in run\n run_sequential(args=args, logger=logger)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 318, in run_sequential\n sys.stdout.flush()\n",
+ "NameError: name 'sys' is not defined\n"
+ ],
+ "heartbeat": "2025-01-06T04:05:20.393530",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_size": 5000,
+ "epsilon_anneal_time": 100000,
+ "obs_agent_id": true,
+ "obs_last_action": false,
+ "runner": "parallel",
+ "t_max": 4050000,
+ "td_lambda": 0.6,
+ "use_tensorboard": false
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "obs_agent_id=True",
+ "obs_last_action=False",
+ "runner=parallel",
+ "batch_size_run=4",
+ "buffer_size=5000",
+ "t_max=4050000",
+ "epsilon_anneal_time=100000",
+ "batch_size=128",
+ "td_lambda=0.6",
+ "use_tensorboard=False"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2025-01-05T20:17:39.546682",
+ "status": "FAILED",
+ "stop_time": "2025-01-06T04:05:21.070681"
+}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/feudal/_sources/logging_f71df6d788e929fac28afdf951d63d54.py b/results/sacred/10gen_protoss/feudal/_sources/logging_f71df6d788e929fac28afdf951d63d54.py
new file mode 100644
index 0000000..5393b7f
--- /dev/null
+++ b/results/sacred/10gen_protoss/feudal/_sources/logging_f71df6d788e929fac28afdf951d63d54.py
@@ -0,0 +1,68 @@
+from collections import defaultdict
+import logging
+import numpy as np
+import torch as th
+
+class Logger:
+ def __init__(self, console_logger):
+ self.console_logger = console_logger
+
+ self.use_tb = False
+ self.use_sacred = False
+ self.use_hdf = False
+
+ self.stats = defaultdict(lambda: [])
+
+ def setup_tb(self, directory_name):
+ # Import here so it doesn't have to be installed if you don't use it
+ from tensorboard_logger import configure, log_value
+ configure(directory_name)
+ self.tb_logger = log_value
+ self.use_tb = True
+
+ def setup_sacred(self, sacred_run_dict):
+ self.sacred_info = sacred_run_dict.info
+ self.use_sacred = True
+
+ def log_stat(self, key, value, t, to_sacred=True):
+ self.stats[key].append((t, value))
+
+ if self.use_tb:
+ self.tb_logger(key, value, t)
+
+ if self.use_sacred and to_sacred:
+ if key in self.sacred_info:
+ self.sacred_info["{}_T".format(key)].append(t)
+ self.sacred_info[key].append(value)
+ else:
+ self.sacred_info["{}_T".format(key)] = [t]
+ self.sacred_info[key] = [value]
+
+ def print_recent_stats(self):
+ log_str = "Recent Stats | t_env: {:>10} | Episode: {:>8}\n".format(*self.stats["episode"][-1])
+ i = 0
+ for (k, v) in sorted(self.stats.items()):
+ if k == "episode":
+ continue
+ i += 1
+ window = 5 if k != "epsilon" else 1
+ item = "{:.4f}".format(th.mean(th.tensor([float(x[1]) for x in self.stats[k][-window:]])))
+ log_str += "{:<25}{:>8}".format(k + ":", item)
+ log_str += "\n" if i % 4 == 0 else "\t"
+ self.console_logger.info(log_str)
+ # Reset stats to avoid accumulating logs in memory
+ self.stats = defaultdict(lambda: [])
+
+
+# set up a custom logger
+def get_logger():
+ logger = logging.getLogger()
+ logger.handlers = []
+ ch = logging.StreamHandler()
+ formatter = logging.Formatter('[%(levelname)s %(asctime)s] %(name)s %(message)s', '%H:%M:%S')
+ ch.setFormatter(formatter)
+ logger.addHandler(ch)
+ logger.setLevel('DEBUG')
+
+ return logger
+
diff --git a/results/sacred/10gen_protoss/feudal/_sources/main_654daaa6534bcee62784d639ea63e51d.py b/results/sacred/10gen_protoss/feudal/_sources/main_654daaa6534bcee62784d639ea63e51d.py
new file mode 100644
index 0000000..199f3a0
--- /dev/null
+++ b/results/sacred/10gen_protoss/feudal/_sources/main_654daaa6534bcee62784d639ea63e51d.py
@@ -0,0 +1,124 @@
+import random
+
+import numpy as np
+import os
+import collections
+from os.path import dirname, abspath, join
+from copy import deepcopy
+from sacred import Experiment, SETTINGS
+from sacred.observers import FileStorageObserver
+from sacred.utils import apply_backspaces_and_linefeeds
+import sys
+import torch as th
+from utils.logging import get_logger
+import yaml
+import collections.abc
+
+from run import REGISTRY as run_REGISTRY
+
+SETTINGS['CAPTURE_MODE'] = "no" # set to "no" if you want to see stdout/stderr in console "fd" or "no"
+logger = get_logger()
+
+ex = Experiment("pymarl")
+ex.logger = logger
+ex.captured_out_filter = apply_backspaces_and_linefeeds
+
+results_path = join(dirname(dirname(abspath(__file__))))
+
+
+@ex.main
+def my_main(_run, _config, _log):
+ # Setting the random seed throughout the modules
+ config = config_copy(_config)
+ random.seed(config["seed"])
+ np.random.seed(config["seed"])
+ th.manual_seed(config["seed"])
+ th.cuda.manual_seed(config["seed"])
+ # th.cuda.manual_seed_all(config["seed"])
+ th.backends.cudnn.deterministic = True # cudnn
+
+
+ config['env_args']['seed'] = config["seed"]
+
+ # run
+ run_REGISTRY[_config['run']](_run, config, _log)
+
+
+def _get_config(params, arg_name, subfolder):
+ config_name = None
+ for _i, _v in enumerate(params):
+ if _v.split("=")[0] == arg_name:
+ config_name = _v.split("=")[1]
+ del params[_i]
+ break
+
+ if config_name is not None:
+ with open(os.path.join(os.path.dirname(__file__), "config", subfolder, "{}.yaml".format(config_name)),
+ "r") as f:
+ try:
+ config_dict = yaml.load(f, Loader=yaml.SafeLoader)
+ except yaml.YAMLError as exc:
+ assert False, "{}.yaml error: {}".format(config_name, exc)
+ return config_dict
+
+
+def recursive_dict_update(d, u):
+ for k, v in u.items():
+ if isinstance(v, collections.abc.Mapping):
+ d[k] = recursive_dict_update(d.get(k, {}), v)
+ else:
+ d[k] = v
+ return d
+
+
+def config_copy(config):
+ if isinstance(config, dict):
+ return {k: config_copy(v) for k, v in config.items()}
+ elif isinstance(config, list):
+ return [config_copy(v) for v in config]
+ else:
+ return deepcopy(config)
+
+
+def parse_command(params, key, default):
+ result = default
+ for _i, _v in enumerate(params):
+ if _v.split("=")[0].strip() == key:
+ result = _v[_v.index('=') + 1:].strip()
+ break
+ return result
+
+
+if __name__ == '__main__':
+ params = deepcopy(sys.argv)
+
+ # Get the defaults from default.yaml
+ with open(os.path.join(os.path.dirname(__file__), "config", "default.yaml"), "r") as f:
+ try:
+ config_dict = yaml.load(f, Loader=yaml.SafeLoader)
+ except yaml.YAMLError as exc:
+ assert False, "default.yaml error: {}".format(exc)
+
+ # Load algorithm and env base configs
+ env_config = _get_config(params, "--env-config", "envs")
+ alg_config = _get_config(params, "--config", "algs")
+ # config_dict = {**config_dict, **env_config, **alg_config}
+ config_dict = recursive_dict_update(config_dict, env_config)
+ config_dict = recursive_dict_update(config_dict, alg_config)
+
+ # now add all the config to sacred
+ ex.add_config(config_dict)
+
+ # Save to disk by default for sacred
+ map_name = parse_command(params, "env_args.map_name", config_dict['env_args']['map_name'])
+ algo_name = parse_command(params, "name", config_dict['name'])
+ local_results_path = parse_command(params, "local_results_path", config_dict['local_results_path'])
+ file_obs_path = join(results_path, local_results_path, "sacred", map_name, algo_name)
+
+ logger.info("Saving to FileStorageObserver in {}.".format(file_obs_path))
+ ex.observers.append(FileStorageObserver.create(file_obs_path))
+
+ ex.run_commandline(params)
+
+ # flush
+ sys.stdout.flush()
diff --git a/results/sacred/10gen_protoss/qmix/1/config.json b/results/sacred/10gen_protoss/qmix/1/config.json
new file mode 100644
index 0000000..5ee7c0e
--- /dev/null
+++ b/results/sacred/10gen_protoss/qmix/1/config.json
@@ -0,0 +1,130 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "n_rnn",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_cpu_only": true,
+ "buffer_size": 5000,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "enable_parallel_computing": false,
+ "env": "sc2_v2",
+ "env_args": {
+ "capability_config": {
+ "n_units": 5,
+ "start_positions": {
+ "dist_type": "surrounded_and_reflect",
+ "map_x": 32,
+ "map_y": 32,
+ "n_enemies": 5,
+ "p": 0.5
+ },
+ "team_gen": {
+ "dist_type": "weighted_teams",
+ "exception_unit_types": [
+ "colossus"
+ ],
+ "observe": true,
+ "unit_types": [
+ "stalker",
+ "zealot",
+ "colossus"
+ ],
+ "weights": [
+ 0.45,
+ 0.45,
+ 0.1
+ ]
+ }
+ },
+ "change_fov_with_move": false,
+ "conic_fov": false,
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "fully_observable": false,
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "kill_unit_step_mul": 2,
+ "map_name": "10gen_protoss",
+ "move_amount": 2,
+ "num_fov_actions": 12,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_own_pos": true,
+ "obs_pathing_grid": false,
+ "obs_starcraft": true,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 100000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gain": 0.01,
+ "gamma": 0.99,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "label": "default_label",
+ "learner": "nq_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.001,
+ "mac": "n_mac",
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "qmix",
+ "obs_agent_id": true,
+ "obs_last_action": false,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "optimizer": "adam",
+ "per_alpha": 0.6,
+ "per_beta": 0.4,
+ "q_lambda": false,
+ "repeat_id": 1,
+ "return_priority": false,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "parallel",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 163470727,
+ "t_max": 4050000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 4,
+ "use_cuda": true,
+ "use_layer_norm": false,
+ "use_orthogonal": false,
+ "use_per": false,
+ "use_tensorboard": true
+}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/qmix/1/cout.txt b/results/sacred/10gen_protoss/qmix/1/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/10gen_protoss/qmix/1/metrics.json b/results/sacred/10gen_protoss/qmix/1/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/10gen_protoss/qmix/1/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/qmix/1/run.json b/results/sacred/10gen_protoss/qmix/1/run.json
new file mode 100644
index 0000000..9db1eaa
--- /dev/null
+++ b/results/sacred/10gen_protoss/qmix/1/run.json
@@ -0,0 +1,124 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_654daaa6534bcee62784d639ea63e51d.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "fail_trace": [
+ "Traceback (most recent call last):\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\sacred\\config\\captured_function.py\", line 42, in captured_function\n result = wrapped(*args, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\main.py\", line 44, in my_main\n run_REGISTRY[_config['run']](_run, config, _log)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 122, in run\n logger.setup_tb(tb_exp_direc)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\utils\\logging.py\", line 19, in setup_tb\n configure(directory_name)\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\tensorboard_logger\\tensorboard_logger.py\", line 296, in configure\n _default_logger = Logger(logdir, flush_secs=flush_secs)\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\tensorboard_logger\\tensorboard_logger.py\", line 51, in __init__\n self._writer = open(filename, 'wb')\n",
+ "FileNotFoundError: [Errno 2] No such file or directory: 'C:\\\\Users\\\\Taiyo\\\\Desktop\\\\SMAC V2\\\\pymarl3\\\\results\\\\tb_logs\\\\sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0\\\\algo=qmix-agent=n_rnn\\\\env_n=4\\\\mixer=qmix\\\\rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k\\\\qmix__2025-01-06_00-15-29\\\\events.out.tfevents.1736093730.Taiyopen'\n"
+ ],
+ "heartbeat": "2025-01-05T16:15:30.002599",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_size": 5000,
+ "epsilon_anneal_time": 100000,
+ "obs_agent_id": true,
+ "obs_last_action": false,
+ "runner": "parallel",
+ "t_max": 4050000,
+ "td_lambda": 0.6
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "obs_agent_id=True",
+ "obs_last_action=False",
+ "runner=parallel",
+ "batch_size_run=4",
+ "buffer_size=5000",
+ "t_max=4050000",
+ "epsilon_anneal_time=100000",
+ "batch_size=128",
+ "td_lambda=0.6"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2025-01-05T16:15:29.896110",
+ "status": "FAILED",
+ "stop_time": "2025-01-05T16:15:30.004651"
+}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/qmix/2/config.json b/results/sacred/10gen_protoss/qmix/2/config.json
new file mode 100644
index 0000000..e979462
--- /dev/null
+++ b/results/sacred/10gen_protoss/qmix/2/config.json
@@ -0,0 +1,130 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "n_rnn",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_cpu_only": true,
+ "buffer_size": 5000,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "enable_parallel_computing": false,
+ "env": "sc2_v2",
+ "env_args": {
+ "capability_config": {
+ "n_units": 5,
+ "start_positions": {
+ "dist_type": "surrounded_and_reflect",
+ "map_x": 32,
+ "map_y": 32,
+ "n_enemies": 5,
+ "p": 0.5
+ },
+ "team_gen": {
+ "dist_type": "weighted_teams",
+ "exception_unit_types": [
+ "colossus"
+ ],
+ "observe": true,
+ "unit_types": [
+ "stalker",
+ "zealot",
+ "colossus"
+ ],
+ "weights": [
+ 0.45,
+ 0.45,
+ 0.1
+ ]
+ }
+ },
+ "change_fov_with_move": false,
+ "conic_fov": false,
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "fully_observable": false,
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "kill_unit_step_mul": 2,
+ "map_name": "10gen_protoss",
+ "move_amount": 2,
+ "num_fov_actions": 12,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_own_pos": true,
+ "obs_pathing_grid": false,
+ "obs_starcraft": true,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 100000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gain": 0.01,
+ "gamma": 0.99,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "label": "default_label",
+ "learner": "nq_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.001,
+ "mac": "n_mac",
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "qmix",
+ "obs_agent_id": true,
+ "obs_last_action": false,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "optimizer": "adam",
+ "per_alpha": 0.6,
+ "per_beta": 0.4,
+ "q_lambda": false,
+ "repeat_id": 1,
+ "return_priority": false,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "parallel",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 790531776,
+ "t_max": 4050000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 4,
+ "use_cuda": true,
+ "use_layer_norm": false,
+ "use_orthogonal": false,
+ "use_per": false,
+ "use_tensorboard": true
+}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/qmix/2/cout.txt b/results/sacred/10gen_protoss/qmix/2/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/10gen_protoss/qmix/2/metrics.json b/results/sacred/10gen_protoss/qmix/2/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/10gen_protoss/qmix/2/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/qmix/2/run.json b/results/sacred/10gen_protoss/qmix/2/run.json
new file mode 100644
index 0000000..a21a951
--- /dev/null
+++ b/results/sacred/10gen_protoss/qmix/2/run.json
@@ -0,0 +1,124 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_654daaa6534bcee62784d639ea63e51d.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "fail_trace": [
+ "Traceback (most recent call last):\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\sacred\\config\\captured_function.py\", line 42, in captured_function\n result = wrapped(*args, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\main.py\", line 44, in my_main\n run_REGISTRY[_config['run']](_run, config, _log)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 122, in run\n logger.setup_tb(tb_exp_direc)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\utils\\logging.py\", line 19, in setup_tb\n configure(directory_name)\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\tensorboard_logger\\tensorboard_logger.py\", line 296, in configure\n _default_logger = Logger(logdir, flush_secs=flush_secs)\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\tensorboard_logger\\tensorboard_logger.py\", line 51, in __init__\n self._writer = open(filename, 'wb')\n",
+ "FileNotFoundError: [Errno 2] No such file or directory: 'C:\\\\Users\\\\Taiyo\\\\Desktop\\\\SMAC V2\\\\pymarl3\\\\results\\\\tb_logs\\\\sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0\\\\algo=qmix-agent=n_rnn\\\\env_n=4\\\\mixer=qmix\\\\rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k\\\\qmix__2025-01-06_00-15-40\\\\events.out.tfevents.1736093740.Taiyopen'\n"
+ ],
+ "heartbeat": "2025-01-05T16:15:40.344931",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_size": 5000,
+ "epsilon_anneal_time": 100000,
+ "obs_agent_id": true,
+ "obs_last_action": false,
+ "runner": "parallel",
+ "t_max": 4050000,
+ "td_lambda": 0.6
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "obs_agent_id=True",
+ "obs_last_action=False",
+ "runner=parallel",
+ "batch_size_run=4",
+ "buffer_size=5000",
+ "t_max=4050000",
+ "epsilon_anneal_time=100000",
+ "batch_size=128",
+ "td_lambda=0.6"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2025-01-05T16:15:40.272388",
+ "status": "FAILED",
+ "stop_time": "2025-01-05T16:15:40.346943"
+}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/qmix/3/config.json b/results/sacred/10gen_protoss/qmix/3/config.json
new file mode 100644
index 0000000..980c7f2
--- /dev/null
+++ b/results/sacred/10gen_protoss/qmix/3/config.json
@@ -0,0 +1,130 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "n_rnn",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_cpu_only": true,
+ "buffer_size": 5000,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "enable_parallel_computing": false,
+ "env": "sc2_v2",
+ "env_args": {
+ "capability_config": {
+ "n_units": 5,
+ "start_positions": {
+ "dist_type": "surrounded_and_reflect",
+ "map_x": 32,
+ "map_y": 32,
+ "n_enemies": 5,
+ "p": 0.5
+ },
+ "team_gen": {
+ "dist_type": "weighted_teams",
+ "exception_unit_types": [
+ "colossus"
+ ],
+ "observe": true,
+ "unit_types": [
+ "stalker",
+ "zealot",
+ "colossus"
+ ],
+ "weights": [
+ 0.45,
+ 0.45,
+ 0.1
+ ]
+ }
+ },
+ "change_fov_with_move": false,
+ "conic_fov": false,
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "fully_observable": false,
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "kill_unit_step_mul": 2,
+ "map_name": "10gen_protoss",
+ "move_amount": 2,
+ "num_fov_actions": 12,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_own_pos": true,
+ "obs_pathing_grid": false,
+ "obs_starcraft": true,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 100000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gain": 0.01,
+ "gamma": 0.99,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "label": "default_label",
+ "learner": "nq_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.001,
+ "mac": "n_mac",
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "qmix",
+ "obs_agent_id": true,
+ "obs_last_action": false,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "optimizer": "adam",
+ "per_alpha": 0.6,
+ "per_beta": 0.4,
+ "q_lambda": false,
+ "repeat_id": 1,
+ "return_priority": false,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "parallel",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 788463946,
+ "t_max": 4050000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 4,
+ "use_cuda": true,
+ "use_layer_norm": false,
+ "use_orthogonal": false,
+ "use_per": false,
+ "use_tensorboard": true
+}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/qmix/3/cout.txt b/results/sacred/10gen_protoss/qmix/3/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/10gen_protoss/qmix/3/metrics.json b/results/sacred/10gen_protoss/qmix/3/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/10gen_protoss/qmix/3/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/qmix/3/run.json b/results/sacred/10gen_protoss/qmix/3/run.json
new file mode 100644
index 0000000..6408a42
--- /dev/null
+++ b/results/sacred/10gen_protoss/qmix/3/run.json
@@ -0,0 +1,124 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_654daaa6534bcee62784d639ea63e51d.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "fail_trace": [
+ "Traceback (most recent call last):\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\sacred\\config\\captured_function.py\", line 42, in captured_function\n result = wrapped(*args, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\main.py\", line 44, in my_main\n run_REGISTRY[_config['run']](_run, config, _log)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 122, in run\n logger.setup_tb(tb_exp_direc)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\utils\\logging.py\", line 19, in setup_tb\n configure(directory_name)\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\tensorboard_logger\\tensorboard_logger.py\", line 296, in configure\n _default_logger = Logger(logdir, flush_secs=flush_secs)\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\tensorboard_logger\\tensorboard_logger.py\", line 51, in __init__\n self._writer = open(filename, 'wb')\n",
+ "FileNotFoundError: [Errno 2] No such file or directory: 'C:\\\\Users\\\\Taiyo\\\\Desktop\\\\SMAC V2\\\\pymarl3\\\\results\\\\tb_logs\\\\sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0\\\\algo=qmix-agent=n_rnn\\\\env_n=4\\\\mixer=qmix\\\\rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k\\\\qmix__2025-01-06_00-16-48\\\\events.out.tfevents.1736093809.Taiyopen'\n"
+ ],
+ "heartbeat": "2025-01-05T16:16:49.016218",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_size": 5000,
+ "epsilon_anneal_time": 100000,
+ "obs_agent_id": true,
+ "obs_last_action": false,
+ "runner": "parallel",
+ "t_max": 4050000,
+ "td_lambda": 0.6
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "obs_agent_id=True",
+ "obs_last_action=False",
+ "runner=parallel",
+ "batch_size_run=4",
+ "buffer_size=5000",
+ "t_max=4050000",
+ "epsilon_anneal_time=100000",
+ "batch_size=128",
+ "td_lambda=0.6"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2025-01-05T16:16:48.931426",
+ "status": "FAILED",
+ "stop_time": "2025-01-05T16:16:49.018225"
+}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/qmix/4/config.json b/results/sacred/10gen_protoss/qmix/4/config.json
new file mode 100644
index 0000000..7566520
--- /dev/null
+++ b/results/sacred/10gen_protoss/qmix/4/config.json
@@ -0,0 +1,130 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "n_rnn",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_cpu_only": true,
+ "buffer_size": 5000,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "enable_parallel_computing": false,
+ "env": "sc2_v2",
+ "env_args": {
+ "capability_config": {
+ "n_units": 5,
+ "start_positions": {
+ "dist_type": "surrounded_and_reflect",
+ "map_x": 32,
+ "map_y": 32,
+ "n_enemies": 5,
+ "p": 0.5
+ },
+ "team_gen": {
+ "dist_type": "weighted_teams",
+ "exception_unit_types": [
+ "colossus"
+ ],
+ "observe": true,
+ "unit_types": [
+ "stalker",
+ "zealot",
+ "colossus"
+ ],
+ "weights": [
+ 0.45,
+ 0.45,
+ 0.1
+ ]
+ }
+ },
+ "change_fov_with_move": false,
+ "conic_fov": false,
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "fully_observable": false,
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "kill_unit_step_mul": 2,
+ "map_name": "10gen_protoss",
+ "move_amount": 2,
+ "num_fov_actions": 12,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_own_pos": true,
+ "obs_pathing_grid": false,
+ "obs_starcraft": true,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 100000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gain": 0.01,
+ "gamma": 0.99,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "label": "default_label",
+ "learner": "nq_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.001,
+ "mac": "n_mac",
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "qmix",
+ "obs_agent_id": true,
+ "obs_last_action": false,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "optimizer": "adam",
+ "per_alpha": 0.6,
+ "per_beta": 0.4,
+ "q_lambda": false,
+ "repeat_id": 1,
+ "return_priority": false,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "parallel",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 727581499,
+ "t_max": 4050000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 4,
+ "use_cuda": true,
+ "use_layer_norm": false,
+ "use_orthogonal": false,
+ "use_per": false,
+ "use_tensorboard": true
+}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/qmix/4/cout.txt b/results/sacred/10gen_protoss/qmix/4/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/10gen_protoss/qmix/4/metrics.json b/results/sacred/10gen_protoss/qmix/4/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/10gen_protoss/qmix/4/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/qmix/4/run.json b/results/sacred/10gen_protoss/qmix/4/run.json
new file mode 100644
index 0000000..3e1c753
--- /dev/null
+++ b/results/sacred/10gen_protoss/qmix/4/run.json
@@ -0,0 +1,124 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_654daaa6534bcee62784d639ea63e51d.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "fail_trace": [
+ "Traceback (most recent call last):\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\sacred\\config\\captured_function.py\", line 42, in captured_function\n result = wrapped(*args, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\main.py\", line 44, in my_main\n run_REGISTRY[_config['run']](_run, config, _log)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 122, in run\n logger.setup_tb(tb_exp_direc)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\utils\\logging.py\", line 19, in setup_tb\n configure(directory_name)\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\tensorboard_logger\\tensorboard_logger.py\", line 296, in configure\n _default_logger = Logger(logdir, flush_secs=flush_secs)\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\tensorboard_logger\\tensorboard_logger.py\", line 51, in __init__\n self._writer = open(filename, 'wb')\n",
+ "FileNotFoundError: [Errno 2] No such file or directory: 'C:\\\\Users\\\\Taiyo\\\\Desktop\\\\SMAC V2\\\\pymarl3\\\\results\\\\tb_logs\\\\sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0\\\\algo=qmix-agent=n_rnn\\\\env_n=4\\\\mixer=qmix\\\\rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k\\\\qmix__2025-01-06_00-23-20\\\\events.out.tfevents.1736094200.Taiyopen'\n"
+ ],
+ "heartbeat": "2025-01-05T16:23:20.246737",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_size": 5000,
+ "epsilon_anneal_time": 100000,
+ "obs_agent_id": true,
+ "obs_last_action": false,
+ "runner": "parallel",
+ "t_max": 4050000,
+ "td_lambda": 0.6
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "obs_agent_id=True",
+ "obs_last_action=False",
+ "runner=parallel",
+ "batch_size_run=4",
+ "buffer_size=5000",
+ "t_max=4050000",
+ "epsilon_anneal_time=100000",
+ "batch_size=128",
+ "td_lambda=0.6"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2025-01-05T16:23:20.174349",
+ "status": "FAILED",
+ "stop_time": "2025-01-05T16:23:20.249243"
+}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/qmix/5/config.json b/results/sacred/10gen_protoss/qmix/5/config.json
new file mode 100644
index 0000000..e88e7fc
--- /dev/null
+++ b/results/sacred/10gen_protoss/qmix/5/config.json
@@ -0,0 +1,130 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "n_rnn",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_cpu_only": true,
+ "buffer_size": 5000,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "enable_parallel_computing": false,
+ "env": "sc2_v2",
+ "env_args": {
+ "capability_config": {
+ "n_units": 5,
+ "start_positions": {
+ "dist_type": "surrounded_and_reflect",
+ "map_x": 32,
+ "map_y": 32,
+ "n_enemies": 5,
+ "p": 0.5
+ },
+ "team_gen": {
+ "dist_type": "weighted_teams",
+ "exception_unit_types": [
+ "colossus"
+ ],
+ "observe": true,
+ "unit_types": [
+ "stalker",
+ "zealot",
+ "colossus"
+ ],
+ "weights": [
+ 0.45,
+ 0.45,
+ 0.1
+ ]
+ }
+ },
+ "change_fov_with_move": false,
+ "conic_fov": false,
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "fully_observable": false,
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "kill_unit_step_mul": 2,
+ "map_name": "10gen_protoss",
+ "move_amount": 2,
+ "num_fov_actions": 12,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_own_pos": true,
+ "obs_pathing_grid": false,
+ "obs_starcraft": true,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 100000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gain": 0.01,
+ "gamma": 0.99,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "label": "default_label",
+ "learner": "nq_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.001,
+ "mac": "n_mac",
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "qmix",
+ "obs_agent_id": true,
+ "obs_last_action": false,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "optimizer": "adam",
+ "per_alpha": 0.6,
+ "per_beta": 0.4,
+ "q_lambda": false,
+ "repeat_id": 1,
+ "return_priority": false,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "parallel",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 148457271,
+ "t_max": 4050000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 4,
+ "use_cuda": true,
+ "use_layer_norm": false,
+ "use_orthogonal": false,
+ "use_per": false,
+ "use_tensorboard": true
+}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/qmix/5/cout.txt b/results/sacred/10gen_protoss/qmix/5/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/10gen_protoss/qmix/5/metrics.json b/results/sacred/10gen_protoss/qmix/5/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/10gen_protoss/qmix/5/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/qmix/5/run.json b/results/sacred/10gen_protoss/qmix/5/run.json
new file mode 100644
index 0000000..b5abe76
--- /dev/null
+++ b/results/sacred/10gen_protoss/qmix/5/run.json
@@ -0,0 +1,126 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_654daaa6534bcee62784d639ea63e51d.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "fail_trace": [
+ "Traceback (most recent call last):\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\sacred\\config\\captured_function.py\", line 42, in captured_function\n result = wrapped(*args, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\main.py\", line 44, in my_main\n run_REGISTRY[_config['run']](_run, config, _log)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 122, in run\n logger.setup_tb(tb_exp_direc)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\utils\\logging.py\", line 19, in setup_tb\n configure(directory_name)\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\tensorboard_logger\\tensorboard_logger.py\", line 296, in configure\n _default_logger = Logger(logdir, flush_secs=flush_secs)\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\tensorboard_logger\\tensorboard_logger.py\", line 51, in __init__\n self._writer = open(filename, 'wb')\n",
+ "FileNotFoundError: [Errno 2] No such file or directory: 'C:\\\\Users\\\\Taiyo\\\\Desktop\\\\SMAC V2\\\\pymarl3\\\\results\\\\tb_logs\\\\sc2_v2_10gen_protoss-obs_aid=1-obs_act=0-conic_fov=0\\\\algo=qmix-agent=n_rnn\\\\env_n=4\\\\mixer=qmix\\\\rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k\\\\qmix__2025-01-06_00-23-53\\\\events.out.tfevents.1736094233.Taiyopen'\n"
+ ],
+ "heartbeat": "2025-01-05T16:23:53.621785",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_size": 5000,
+ "epsilon_anneal_time": 100000,
+ "obs_agent_id": true,
+ "obs_last_action": false,
+ "runner": "parallel",
+ "t_max": 4050000,
+ "td_lambda": 0.6,
+ "use_tensorboard": true
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "obs_agent_id=True",
+ "obs_last_action=False",
+ "runner=parallel",
+ "batch_size_run=4",
+ "buffer_size=5000",
+ "t_max=4050000",
+ "epsilon_anneal_time=100000",
+ "batch_size=128",
+ "td_lambda=0.6",
+ "use_tensorboard=True"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2025-01-05T16:23:53.552221",
+ "status": "FAILED",
+ "stop_time": "2025-01-05T16:23:53.623909"
+}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/qmix/6/config.json b/results/sacred/10gen_protoss/qmix/6/config.json
new file mode 100644
index 0000000..47acf21
--- /dev/null
+++ b/results/sacred/10gen_protoss/qmix/6/config.json
@@ -0,0 +1,130 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "n_rnn",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_cpu_only": true,
+ "buffer_size": 5000,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "enable_parallel_computing": false,
+ "env": "sc2_v2",
+ "env_args": {
+ "capability_config": {
+ "n_units": 5,
+ "start_positions": {
+ "dist_type": "surrounded_and_reflect",
+ "map_x": 32,
+ "map_y": 32,
+ "n_enemies": 5,
+ "p": 0.5
+ },
+ "team_gen": {
+ "dist_type": "weighted_teams",
+ "exception_unit_types": [
+ "colossus"
+ ],
+ "observe": true,
+ "unit_types": [
+ "stalker",
+ "zealot",
+ "colossus"
+ ],
+ "weights": [
+ 0.45,
+ 0.45,
+ 0.1
+ ]
+ }
+ },
+ "change_fov_with_move": false,
+ "conic_fov": false,
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "fully_observable": false,
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "kill_unit_step_mul": 2,
+ "map_name": "10gen_protoss",
+ "move_amount": 2,
+ "num_fov_actions": 12,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_own_pos": true,
+ "obs_pathing_grid": false,
+ "obs_starcraft": true,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 100000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gain": 0.01,
+ "gamma": 0.99,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "label": "default_label",
+ "learner": "nq_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.001,
+ "mac": "n_mac",
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "qmix",
+ "obs_agent_id": true,
+ "obs_last_action": false,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "optimizer": "adam",
+ "per_alpha": 0.6,
+ "per_beta": 0.4,
+ "q_lambda": false,
+ "repeat_id": 1,
+ "return_priority": false,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "parallel",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 112463159,
+ "t_max": 4050000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 4,
+ "use_cuda": true,
+ "use_layer_norm": false,
+ "use_orthogonal": false,
+ "use_per": false,
+ "use_tensorboard": false
+}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/qmix/6/cout.txt b/results/sacred/10gen_protoss/qmix/6/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/10gen_protoss/qmix/6/info.json b/results/sacred/10gen_protoss/qmix/6/info.json
new file mode 100644
index 0000000..f12cda6
--- /dev/null
+++ b/results/sacred/10gen_protoss/qmix/6/info.json
@@ -0,0 +1,26326 @@
+{
+ "battle_won_mean": [
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.010869565217391304,
+ 0.02717391304347826,
+ 0.02717391304347826,
+ 0.07608695652173914,
+ 0.09239130434782608,
+ 0.10638297872340426,
+ 0.09444444444444444,
+ 0.11931818181818182,
+ 0.15217391304347827,
+ 0.11956521739130435,
+ 0.17222222222222222,
+ 0.2,
+ 0.17613636363636365,
+ 0.1793478260869565,
+ 0.10326086956521739,
+ 0.2159090909090909,
+ 0.13297872340425532,
+ 0.15555555555555556,
+ 0.21022727272727273,
+ 0.2111111111111111,
+ 0.21666666666666667,
+ 0.18888888888888888,
+ 0.18333333333333332,
+ 0.25,
+ 0.2784090909090909,
+ 0.30113636363636365,
+ 0.27325581395348836,
+ 0.22777777777777777,
+ 0.22727272727272727,
+ 0.20348837209302326,
+ 0.25,
+ 0.22674418604651161,
+ 0.23295454545454544,
+ 0.3081395348837209,
+ 0.2556818181818182,
+ 0.25595238095238093,
+ 0.22674418604651161,
+ 0.26785714285714285,
+ 0.29878048780487804,
+ 0.22674418604651161,
+ 0.2804878048780488,
+ 0.25,
+ 0.2976190476190476,
+ 0.30357142857142855,
+ 0.32142857142857145,
+ 0.2619047619047619,
+ 0.36627906976744184,
+ 0.30952380952380953,
+ 0.3333333333333333,
+ 0.28488372093023256,
+ 0.3333333333333333,
+ 0.2976190476190476,
+ 0.32926829268292684,
+ 0.3430232558139535,
+ 0.2926829268292683,
+ 0.39375,
+ 0.31097560975609756,
+ 0.32926829268292684,
+ 0.34523809523809523,
+ 0.36627906976744184,
+ 0.3475609756097561,
+ 0.3333333333333333,
+ 0.3170731707317073,
+ 0.34523809523809523,
+ 0.39634146341463417,
+ 0.35365853658536583,
+ 0.3597560975609756,
+ 0.375,
+ 0.34146341463414637,
+ 0.2682926829268293,
+ 0.3273809523809524,
+ 0.3719512195121951,
+ 0.34146341463414637,
+ 0.34523809523809523,
+ 0.2976190476190476,
+ 0.3719512195121951,
+ 0.3902439024390244,
+ 0.375,
+ 0.375,
+ 0.375,
+ 0.3231707317073171,
+ 0.3780487804878049,
+ 0.3869047619047619,
+ 0.40625,
+ 0.49390243902439024,
+ 0.3974358974358974,
+ 0.43902439024390244,
+ 0.425,
+ 0.3625,
+ 0.4166666666666667,
+ 0.358974358974359,
+ 0.3719512195121951,
+ 0.3875,
+ 0.48125,
+ 0.4375,
+ 0.5192307692307693,
+ 0.54375,
+ 0.44375,
+ 0.40625,
+ 0.4551282051282051,
+ 0.4875,
+ 0.4125,
+ 0.5125,
+ 0.4375,
+ 0.47435897435897434,
+ 0.46794871794871795,
+ 0.5,
+ 0.49375,
+ 0.5256410256410257,
+ 0.5576923076923077,
+ 0.5576923076923077,
+ 0.475,
+ 0.4551282051282051,
+ 0.4230769230769231,
+ 0.5128205128205128,
+ 0.4551282051282051,
+ 0.47435897435897434,
+ 0.5125,
+ 0.4342105263157895,
+ 0.46153846153846156,
+ 0.46794871794871795,
+ 0.4276315789473684,
+ 0.506578947368421,
+ 0.54375,
+ 0.4423076923076923,
+ 0.5,
+ 0.5128205128205128,
+ 0.5128205128205128,
+ 0.5,
+ 0.4935897435897436,
+ 0.54375,
+ 0.46710526315789475,
+ 0.50625,
+ 0.4868421052631579,
+ 0.4423076923076923,
+ 0.4807692307692308,
+ 0.5448717948717948,
+ 0.5448717948717948,
+ 0.5705128205128205,
+ 0.5394736842105263,
+ 0.506578947368421,
+ 0.48026315789473684,
+ 0.5263157894736842,
+ 0.5921052631578947,
+ 0.5448717948717948,
+ 0.5192307692307693,
+ 0.5192307692307693,
+ 0.581081081081081,
+ 0.47368421052631576,
+ 0.5064102564102564,
+ 0.532051282051282,
+ 0.55625,
+ 0.532051282051282,
+ 0.5789473684210527,
+ 0.5641025641025641,
+ 0.48026315789473684,
+ 0.5460526315789473,
+ 0.5897435897435898,
+ 0.5394736842105263,
+ 0.5328947368421053,
+ 0.5897435897435898,
+ 0.5405405405405406,
+ 0.6052631578947368,
+ 0.5855263157894737,
+ 0.5472972972972973,
+ 0.6025641025641025,
+ 0.5986842105263158,
+ 0.5328947368421053,
+ 0.6710526315789473,
+ 0.5705128205128205,
+ 0.5769230769230769,
+ 0.625,
+ 0.5657894736842105,
+ 0.5064102564102564,
+ 0.6118421052631579,
+ 0.625,
+ 0.5986842105263158,
+ 0.532051282051282,
+ 0.5460526315789473,
+ 0.5460526315789473,
+ 0.5986842105263158,
+ 0.532051282051282,
+ 0.5986842105263158,
+ 0.5657894736842105,
+ 0.6447368421052632,
+ 0.5833333333333334,
+ 0.631578947368421,
+ 0.618421052631579,
+ 0.5197368421052632,
+ 0.5384615384615384,
+ 0.5328947368421053,
+ 0.5448717948717948,
+ 0.6410256410256411,
+ 0.5789473684210527,
+ 0.5128205128205128,
+ 0.5833333333333334,
+ 0.5769230769230769,
+ 0.6081081081081081,
+ 0.5256410256410257,
+ 0.5592105263157895,
+ 0.5641025641025641,
+ 0.5789473684210527
+ ],
+ "battle_won_mean_T": [
+ 204,
+ 10383,
+ 20499,
+ 30544,
+ 40732,
+ 50928,
+ 61090,
+ 71149,
+ 81348,
+ 91439,
+ 101594,
+ 111791,
+ 121859,
+ 132012,
+ 142192,
+ 152334,
+ 162438,
+ 172608,
+ 182793,
+ 192903,
+ 203004,
+ 213200,
+ 223268,
+ 233435,
+ 243539,
+ 253728,
+ 263840,
+ 273903,
+ 284085,
+ 294331,
+ 304469,
+ 314577,
+ 324715,
+ 334829,
+ 344974,
+ 355096,
+ 365159,
+ 375218,
+ 385451,
+ 395480,
+ 405603,
+ 415734,
+ 425903,
+ 436024,
+ 446044,
+ 456086,
+ 466137,
+ 476185,
+ 486421,
+ 496587,
+ 506781,
+ 516860,
+ 526869,
+ 537096,
+ 547346,
+ 557559,
+ 567625,
+ 577737,
+ 587967,
+ 598121,
+ 608154,
+ 618172,
+ 628277,
+ 638573,
+ 648758,
+ 658896,
+ 668985,
+ 678986,
+ 689111,
+ 699143,
+ 709193,
+ 719215,
+ 729432,
+ 739537,
+ 749704,
+ 759809,
+ 769880,
+ 779967,
+ 790179,
+ 800377,
+ 810400,
+ 820450,
+ 830717,
+ 840907,
+ 850964,
+ 861069,
+ 871222,
+ 881356,
+ 891470,
+ 901573,
+ 911612,
+ 921754,
+ 931892,
+ 941893,
+ 951969,
+ 962031,
+ 972155,
+ 982347,
+ 992531,
+ 1002678,
+ 1012722,
+ 1022802,
+ 1032827,
+ 1043047,
+ 1053166,
+ 1063204,
+ 1073415,
+ 1083590,
+ 1093731,
+ 1103844,
+ 1113914,
+ 1124075,
+ 1134341,
+ 1144490,
+ 1154545,
+ 1164755,
+ 1175011,
+ 1185189,
+ 1195359,
+ 1205508,
+ 1215725,
+ 1226005,
+ 1236238,
+ 1246442,
+ 1256533,
+ 1266619,
+ 1276736,
+ 1286779,
+ 1297019,
+ 1307046,
+ 1317312,
+ 1327418,
+ 1337430,
+ 1347674,
+ 1357706,
+ 1367819,
+ 1377885,
+ 1388063,
+ 1398132,
+ 1408194,
+ 1418235,
+ 1428421,
+ 1438473,
+ 1448486,
+ 1458511,
+ 1468621,
+ 1478707,
+ 1488809,
+ 1498973,
+ 1509180,
+ 1519321,
+ 1529401,
+ 1539490,
+ 1549607,
+ 1559754,
+ 1569821,
+ 1580005,
+ 1590221,
+ 1600450,
+ 1610609,
+ 1620717,
+ 1630961,
+ 1641221,
+ 1651225,
+ 1661334,
+ 1671558,
+ 1681608,
+ 1691630,
+ 1701665,
+ 1711681,
+ 1721915,
+ 1732035,
+ 1742290,
+ 1752325,
+ 1762529,
+ 1772769,
+ 1782970,
+ 1792991,
+ 1803129,
+ 1813298,
+ 1823322,
+ 1833470,
+ 1843699,
+ 1853838,
+ 1863988,
+ 1874138,
+ 1884357,
+ 1894568,
+ 1904646,
+ 1914776,
+ 1925021,
+ 1935060,
+ 1945284,
+ 1955531,
+ 1965755,
+ 1975880,
+ 1985931,
+ 1996175,
+ 2006263,
+ 2016287,
+ 2026501,
+ 2036692,
+ 2046729,
+ 2056938,
+ 2067110,
+ 2077359,
+ 2087531
+ ],
+ "dead_allies_mean": [
+ 5.0,
+ 5.0,
+ 5.0,
+ 5.0,
+ 5.0,
+ 4.978260869565218,
+ 4.940217391304348,
+ 4.951086956521739,
+ 4.820652173913044,
+ 4.815217391304348,
+ 4.76063829787234,
+ 4.822222222222222,
+ 4.7272727272727275,
+ 4.630434782608695,
+ 4.711956521739131,
+ 4.633333333333334,
+ 4.555555555555555,
+ 4.6022727272727275,
+ 4.570652173913044,
+ 4.728260869565218,
+ 4.482954545454546,
+ 4.691489361702128,
+ 4.544444444444444,
+ 4.568181818181818,
+ 4.5055555555555555,
+ 4.483333333333333,
+ 4.533333333333333,
+ 4.538888888888889,
+ 4.4222222222222225,
+ 4.340909090909091,
+ 4.284090909090909,
+ 4.3604651162790695,
+ 4.455555555555556,
+ 4.4772727272727275,
+ 4.482558139534884,
+ 4.363636363636363,
+ 4.488372093023256,
+ 4.431818181818182,
+ 4.27906976744186,
+ 4.346590909090909,
+ 4.464285714285714,
+ 4.453488372093023,
+ 4.369047619047619,
+ 4.359756097560975,
+ 4.436046511627907,
+ 4.353658536585366,
+ 4.487804878048781,
+ 4.285714285714286,
+ 4.315476190476191,
+ 4.333333333333333,
+ 4.410714285714286,
+ 4.209302325581396,
+ 4.285714285714286,
+ 4.220238095238095,
+ 4.401162790697675,
+ 4.238095238095238,
+ 4.273809523809524,
+ 4.189024390243903,
+ 4.145348837209302,
+ 4.323170731707317,
+ 4.06875,
+ 4.347560975609756,
+ 4.2317073170731705,
+ 4.208333333333333,
+ 4.075581395348837,
+ 4.079268292682927,
+ 4.184523809523809,
+ 4.329268292682927,
+ 4.238095238095238,
+ 4.134146341463414,
+ 4.158536585365853,
+ 4.189024390243903,
+ 4.065476190476191,
+ 4.2073170731707314,
+ 4.384146341463414,
+ 4.238095238095238,
+ 4.097560975609756,
+ 4.121951219512195,
+ 4.226190476190476,
+ 4.273809523809524,
+ 4.097560975609756,
+ 4.128048780487805,
+ 4.154761904761905,
+ 4.083333333333333,
+ 4.1375,
+ 4.304878048780488,
+ 4.085365853658536,
+ 3.988095238095238,
+ 4.08125,
+ 3.8780487804878048,
+ 4.108974358974359,
+ 3.9146341463414633,
+ 4.0375,
+ 4.15625,
+ 4.051282051282051,
+ 4.211538461538462,
+ 4.091463414634147,
+ 4.18125,
+ 3.89375,
+ 4.0125,
+ 3.782051282051282,
+ 3.65625,
+ 3.89375,
+ 3.9875,
+ 4.038461538461538,
+ 3.88125,
+ 4.0375,
+ 3.79375,
+ 3.875,
+ 3.91025641025641,
+ 3.9551282051282053,
+ 3.86875,
+ 3.84375,
+ 3.826923076923077,
+ 3.7435897435897436,
+ 3.7948717948717947,
+ 3.95625,
+ 3.967948717948718,
+ 4.089743589743589,
+ 3.826923076923077,
+ 3.9615384615384617,
+ 3.9038461538461537,
+ 3.825,
+ 4.0855263157894735,
+ 3.9551282051282053,
+ 3.9743589743589745,
+ 4.0394736842105265,
+ 3.710526315789474,
+ 3.69375,
+ 3.871794871794872,
+ 3.7948717948717947,
+ 3.8333333333333335,
+ 3.8525641025641026,
+ 3.83125,
+ 3.7756410256410255,
+ 3.6375,
+ 3.861842105263158,
+ 3.81875,
+ 3.8815789473684212,
+ 4.012820512820513,
+ 3.8205128205128207,
+ 3.75,
+ 3.7115384615384617,
+ 3.6153846153846154,
+ 3.6710526315789473,
+ 3.7960526315789473,
+ 3.9210526315789473,
+ 3.8421052631578947,
+ 3.6578947368421053,
+ 3.730769230769231,
+ 3.801282051282051,
+ 3.7115384615384617,
+ 3.7364864864864864,
+ 3.9539473684210527,
+ 3.8525641025641026,
+ 3.7243589743589745,
+ 3.6875,
+ 3.8205128205128207,
+ 3.625,
+ 3.5705128205128207,
+ 3.9473684210526314,
+ 3.789473684210526,
+ 3.5641025641025643,
+ 3.6578947368421053,
+ 3.75,
+ 3.673076923076923,
+ 3.72972972972973,
+ 3.5065789473684212,
+ 3.598684210526316,
+ 3.7567567567567566,
+ 3.519230769230769,
+ 3.664473684210526,
+ 3.6447368421052633,
+ 3.3815789473684212,
+ 3.5961538461538463,
+ 3.641025641025641,
+ 3.6315789473684212,
+ 3.598684210526316,
+ 3.7628205128205128,
+ 3.5723684210526314,
+ 3.539473684210526,
+ 3.638157894736842,
+ 3.7051282051282053,
+ 3.75,
+ 3.7828947368421053,
+ 3.5723684210526314,
+ 3.782051282051282,
+ 3.5657894736842106,
+ 3.6710526315789473,
+ 3.4934210526315788,
+ 3.5705128205128207,
+ 3.486842105263158,
+ 3.486842105263158,
+ 3.7697368421052633,
+ 3.673076923076923,
+ 3.6907894736842106,
+ 3.6474358974358974,
+ 3.4615384615384617,
+ 3.651315789473684,
+ 3.8141025641025643,
+ 3.6025641025641026,
+ 3.7243589743589745,
+ 3.635135135135135,
+ 3.7115384615384617,
+ 3.7039473684210527,
+ 3.6666666666666665,
+ 3.6907894736842106
+ ],
+ "dead_allies_mean_T": [
+ 204,
+ 10383,
+ 20499,
+ 30544,
+ 40732,
+ 50928,
+ 61090,
+ 71149,
+ 81348,
+ 91439,
+ 101594,
+ 111791,
+ 121859,
+ 132012,
+ 142192,
+ 152334,
+ 162438,
+ 172608,
+ 182793,
+ 192903,
+ 203004,
+ 213200,
+ 223268,
+ 233435,
+ 243539,
+ 253728,
+ 263840,
+ 273903,
+ 284085,
+ 294331,
+ 304469,
+ 314577,
+ 324715,
+ 334829,
+ 344974,
+ 355096,
+ 365159,
+ 375218,
+ 385451,
+ 395480,
+ 405603,
+ 415734,
+ 425903,
+ 436024,
+ 446044,
+ 456086,
+ 466137,
+ 476185,
+ 486421,
+ 496587,
+ 506781,
+ 516860,
+ 526869,
+ 537096,
+ 547346,
+ 557559,
+ 567625,
+ 577737,
+ 587967,
+ 598121,
+ 608154,
+ 618172,
+ 628277,
+ 638573,
+ 648758,
+ 658896,
+ 668985,
+ 678986,
+ 689111,
+ 699143,
+ 709193,
+ 719215,
+ 729432,
+ 739537,
+ 749704,
+ 759809,
+ 769880,
+ 779967,
+ 790179,
+ 800377,
+ 810400,
+ 820450,
+ 830717,
+ 840907,
+ 850964,
+ 861069,
+ 871222,
+ 881356,
+ 891470,
+ 901573,
+ 911612,
+ 921754,
+ 931892,
+ 941893,
+ 951969,
+ 962031,
+ 972155,
+ 982347,
+ 992531,
+ 1002678,
+ 1012722,
+ 1022802,
+ 1032827,
+ 1043047,
+ 1053166,
+ 1063204,
+ 1073415,
+ 1083590,
+ 1093731,
+ 1103844,
+ 1113914,
+ 1124075,
+ 1134341,
+ 1144490,
+ 1154545,
+ 1164755,
+ 1175011,
+ 1185189,
+ 1195359,
+ 1205508,
+ 1215725,
+ 1226005,
+ 1236238,
+ 1246442,
+ 1256533,
+ 1266619,
+ 1276736,
+ 1286779,
+ 1297019,
+ 1307046,
+ 1317312,
+ 1327418,
+ 1337430,
+ 1347674,
+ 1357706,
+ 1367819,
+ 1377885,
+ 1388063,
+ 1398132,
+ 1408194,
+ 1418235,
+ 1428421,
+ 1438473,
+ 1448486,
+ 1458511,
+ 1468621,
+ 1478707,
+ 1488809,
+ 1498973,
+ 1509180,
+ 1519321,
+ 1529401,
+ 1539490,
+ 1549607,
+ 1559754,
+ 1569821,
+ 1580005,
+ 1590221,
+ 1600450,
+ 1610609,
+ 1620717,
+ 1630961,
+ 1641221,
+ 1651225,
+ 1661334,
+ 1671558,
+ 1681608,
+ 1691630,
+ 1701665,
+ 1711681,
+ 1721915,
+ 1732035,
+ 1742290,
+ 1752325,
+ 1762529,
+ 1772769,
+ 1782970,
+ 1792991,
+ 1803129,
+ 1813298,
+ 1823322,
+ 1833470,
+ 1843699,
+ 1853838,
+ 1863988,
+ 1874138,
+ 1884357,
+ 1894568,
+ 1904646,
+ 1914776,
+ 1925021,
+ 1935060,
+ 1945284,
+ 1955531,
+ 1965755,
+ 1975880,
+ 1985931,
+ 1996175,
+ 2006263,
+ 2016287,
+ 2026501,
+ 2036692,
+ 2046729,
+ 2056938,
+ 2067110,
+ 2077359,
+ 2087531
+ ],
+ "dead_enemies_mean": [
+ 0.25,
+ 0.33695652173913043,
+ 0.4673913043478261,
+ 0.5543478260869565,
+ 0.6808510638297872,
+ 1.1141304347826086,
+ 1.1684782608695652,
+ 1.4021739130434783,
+ 1.6358695652173914,
+ 1.7554347826086956,
+ 2.0,
+ 2.011111111111111,
+ 2.25,
+ 2.2717391304347827,
+ 2.1956521739130435,
+ 2.3833333333333333,
+ 2.4944444444444445,
+ 2.4545454545454546,
+ 2.505434782608696,
+ 2.2010869565217392,
+ 2.6988636363636362,
+ 2.127659574468085,
+ 2.5055555555555555,
+ 2.6420454545454546,
+ 2.7,
+ 2.6222222222222222,
+ 2.5277777777777777,
+ 2.5,
+ 2.7555555555555555,
+ 2.852272727272727,
+ 2.9886363636363638,
+ 3.0174418604651163,
+ 2.7555555555555555,
+ 2.710227272727273,
+ 2.86046511627907,
+ 2.8636363636363638,
+ 2.9709302325581395,
+ 2.7954545454545454,
+ 3.13953488372093,
+ 2.897727272727273,
+ 3.0952380952380953,
+ 2.88953488372093,
+ 3.107142857142857,
+ 3.2134146341463414,
+ 2.895348837209302,
+ 3.268292682926829,
+ 3.1158536585365852,
+ 3.3095238095238093,
+ 3.2916666666666665,
+ 3.1904761904761907,
+ 3.267857142857143,
+ 3.38953488372093,
+ 3.1904761904761907,
+ 3.4047619047619047,
+ 3.0930232558139537,
+ 3.380952380952381,
+ 3.2738095238095237,
+ 3.3963414634146343,
+ 3.2674418604651163,
+ 3.3109756097560976,
+ 3.51875,
+ 3.3353658536585367,
+ 3.518292682926829,
+ 3.4285714285714284,
+ 3.4593023255813953,
+ 3.317073170731707,
+ 3.3333333333333335,
+ 3.3841463414634148,
+ 3.380952380952381,
+ 3.5609756097560976,
+ 3.5792682926829267,
+ 3.4146341463414633,
+ 3.4464285714285716,
+ 3.5304878048780486,
+ 3.3048780487804876,
+ 3.386904761904762,
+ 3.5,
+ 3.4695121951219514,
+ 3.3988095238095237,
+ 3.392857142857143,
+ 3.542682926829268,
+ 3.5121951219512195,
+ 3.488095238095238,
+ 3.5595238095238093,
+ 3.6625,
+ 3.475609756097561,
+ 3.5792682926829267,
+ 3.5654761904761907,
+ 3.7,
+ 3.7560975609756095,
+ 3.7435897435897436,
+ 3.707317073170732,
+ 3.8875,
+ 3.6625,
+ 3.7756410256410255,
+ 3.628205128205128,
+ 3.6646341463414633,
+ 3.75,
+ 3.825,
+ 3.80625,
+ 3.9551282051282053,
+ 3.9625,
+ 3.79375,
+ 3.71875,
+ 3.8525641025641026,
+ 3.89375,
+ 3.81875,
+ 4.03125,
+ 3.7625,
+ 3.8525641025641026,
+ 3.9358974358974357,
+ 3.9125,
+ 3.94375,
+ 4.102564102564102,
+ 4.147435897435898,
+ 4.141025641025641,
+ 3.9,
+ 3.8205128205128207,
+ 3.858974358974359,
+ 3.9615384615384617,
+ 3.9423076923076925,
+ 3.9615384615384617,
+ 3.95625,
+ 3.914473684210526,
+ 3.8205128205128207,
+ 3.801282051282051,
+ 3.8815789473684212,
+ 4.032894736842105,
+ 4.075,
+ 3.7243589743589745,
+ 4.032051282051282,
+ 4.006410256410256,
+ 3.9743589743589745,
+ 4.01875,
+ 4.064102564102564,
+ 4.0375,
+ 3.9078947368421053,
+ 3.9875,
+ 3.9802631578947367,
+ 3.7884615384615383,
+ 3.8846153846153846,
+ 4.07051282051282,
+ 4.0576923076923075,
+ 4.089743589743589,
+ 4.052631578947368,
+ 3.9342105263157894,
+ 3.9671052631578947,
+ 4.059210526315789,
+ 4.171052631578948,
+ 4.051282051282051,
+ 3.967948717948718,
+ 4.038461538461538,
+ 4.155405405405405,
+ 3.9407894736842106,
+ 3.980769230769231,
+ 4.064102564102564,
+ 4.01875,
+ 4.0,
+ 4.197368421052632,
+ 4.166666666666667,
+ 4.065789473684211,
+ 4.072368421052632,
+ 4.237179487179487,
+ 4.1381578947368425,
+ 4.131578947368421,
+ 4.217948717948718,
+ 4.155405405405405,
+ 4.223684210526316,
+ 4.125,
+ 4.202702702702703,
+ 4.147435897435898,
+ 4.269736842105263,
+ 4.059210526315789,
+ 4.269736842105263,
+ 4.089743589743589,
+ 4.083333333333333,
+ 4.2894736842105265,
+ 4.118421052631579,
+ 3.9615384615384617,
+ 4.2894736842105265,
+ 4.302631578947368,
+ 4.177631578947368,
+ 4.083333333333333,
+ 4.098684210526316,
+ 4.072368421052632,
+ 4.223684210526316,
+ 4.006410256410256,
+ 4.203947368421052,
+ 4.197368421052632,
+ 4.355263157894737,
+ 4.128205128205129,
+ 4.276315789473684,
+ 4.243421052631579,
+ 3.9407894736842106,
+ 4.07051282051282,
+ 4.0394736842105265,
+ 4.064102564102564,
+ 4.301282051282051,
+ 4.151315789473684,
+ 3.891025641025641,
+ 4.17948717948718,
+ 4.102564102564102,
+ 4.243243243243243,
+ 4.012820512820513,
+ 4.2105263157894735,
+ 4.115384615384615,
+ 4.203947368421052
+ ],
+ "dead_enemies_mean_T": [
+ 204,
+ 10383,
+ 20499,
+ 30544,
+ 40732,
+ 50928,
+ 61090,
+ 71149,
+ 81348,
+ 91439,
+ 101594,
+ 111791,
+ 121859,
+ 132012,
+ 142192,
+ 152334,
+ 162438,
+ 172608,
+ 182793,
+ 192903,
+ 203004,
+ 213200,
+ 223268,
+ 233435,
+ 243539,
+ 253728,
+ 263840,
+ 273903,
+ 284085,
+ 294331,
+ 304469,
+ 314577,
+ 324715,
+ 334829,
+ 344974,
+ 355096,
+ 365159,
+ 375218,
+ 385451,
+ 395480,
+ 405603,
+ 415734,
+ 425903,
+ 436024,
+ 446044,
+ 456086,
+ 466137,
+ 476185,
+ 486421,
+ 496587,
+ 506781,
+ 516860,
+ 526869,
+ 537096,
+ 547346,
+ 557559,
+ 567625,
+ 577737,
+ 587967,
+ 598121,
+ 608154,
+ 618172,
+ 628277,
+ 638573,
+ 648758,
+ 658896,
+ 668985,
+ 678986,
+ 689111,
+ 699143,
+ 709193,
+ 719215,
+ 729432,
+ 739537,
+ 749704,
+ 759809,
+ 769880,
+ 779967,
+ 790179,
+ 800377,
+ 810400,
+ 820450,
+ 830717,
+ 840907,
+ 850964,
+ 861069,
+ 871222,
+ 881356,
+ 891470,
+ 901573,
+ 911612,
+ 921754,
+ 931892,
+ 941893,
+ 951969,
+ 962031,
+ 972155,
+ 982347,
+ 992531,
+ 1002678,
+ 1012722,
+ 1022802,
+ 1032827,
+ 1043047,
+ 1053166,
+ 1063204,
+ 1073415,
+ 1083590,
+ 1093731,
+ 1103844,
+ 1113914,
+ 1124075,
+ 1134341,
+ 1144490,
+ 1154545,
+ 1164755,
+ 1175011,
+ 1185189,
+ 1195359,
+ 1205508,
+ 1215725,
+ 1226005,
+ 1236238,
+ 1246442,
+ 1256533,
+ 1266619,
+ 1276736,
+ 1286779,
+ 1297019,
+ 1307046,
+ 1317312,
+ 1327418,
+ 1337430,
+ 1347674,
+ 1357706,
+ 1367819,
+ 1377885,
+ 1388063,
+ 1398132,
+ 1408194,
+ 1418235,
+ 1428421,
+ 1438473,
+ 1448486,
+ 1458511,
+ 1468621,
+ 1478707,
+ 1488809,
+ 1498973,
+ 1509180,
+ 1519321,
+ 1529401,
+ 1539490,
+ 1549607,
+ 1559754,
+ 1569821,
+ 1580005,
+ 1590221,
+ 1600450,
+ 1610609,
+ 1620717,
+ 1630961,
+ 1641221,
+ 1651225,
+ 1661334,
+ 1671558,
+ 1681608,
+ 1691630,
+ 1701665,
+ 1711681,
+ 1721915,
+ 1732035,
+ 1742290,
+ 1752325,
+ 1762529,
+ 1772769,
+ 1782970,
+ 1792991,
+ 1803129,
+ 1813298,
+ 1823322,
+ 1833470,
+ 1843699,
+ 1853838,
+ 1863988,
+ 1874138,
+ 1884357,
+ 1894568,
+ 1904646,
+ 1914776,
+ 1925021,
+ 1935060,
+ 1945284,
+ 1955531,
+ 1965755,
+ 1975880,
+ 1985931,
+ 1996175,
+ 2006263,
+ 2016287,
+ 2026501,
+ 2036692,
+ 2046729,
+ 2056938,
+ 2067110,
+ 2077359,
+ 2087531
+ ],
+ "ep_length_mean": [
+ 51.0,
+ 55.32065217391305,
+ 54.97826086956522,
+ 54.59239130434783,
+ 54.191489361702125,
+ 55.41304347826087,
+ 55.22826086956522,
+ 54.66847826086956,
+ 55.42934782608695,
+ 54.84239130434783,
+ 54.015957446808514,
+ 56.65,
+ 57.20454545454545,
+ 55.17934782608695,
+ 55.32608695652174,
+ 56.34444444444444,
+ 56.13333333333333,
+ 57.78409090909091,
+ 55.35326086956522,
+ 54.94565217391305,
+ 57.39204545454545,
+ 54.234042553191486,
+ 55.93333333333333,
+ 57.76704545454545,
+ 56.13333333333333,
+ 56.605555555555554,
+ 56.17777777777778,
+ 55.90555555555556,
+ 56.56666666666667,
+ 58.21590909090909,
+ 57.60227272727273,
+ 58.76744186046512,
+ 56.32222222222222,
+ 57.46590909090909,
+ 58.98255813953488,
+ 57.51136363636363,
+ 58.50581395348837,
+ 57.15340909090909,
+ 59.49418604651163,
+ 56.98295454545455,
+ 60.25595238095238,
+ 58.901162790697676,
+ 60.529761904761905,
+ 61.71341463414634,
+ 58.25581395348837,
+ 61.23170731707317,
+ 61.28658536585366,
+ 59.80952380952381,
+ 60.92857142857143,
+ 60.51190476190476,
+ 60.67857142857143,
+ 58.598837209302324,
+ 59.57738095238095,
+ 60.875,
+ 59.593023255813954,
+ 60.791666666666664,
+ 59.916666666666664,
+ 61.65853658536585,
+ 59.47674418604651,
+ 61.91463414634146,
+ 62.70625,
+ 61.08536585365854,
+ 61.61585365853659,
+ 61.285714285714285,
+ 59.21511627906977,
+ 61.81707317073171,
+ 60.05357142857143,
+ 60.98170731707317,
+ 60.267857142857146,
+ 61.170731707317074,
+ 61.28048780487805,
+ 61.109756097560975,
+ 60.81547619047619,
+ 61.61585365853659,
+ 61.99390243902439,
+ 60.148809523809526,
+ 61.40853658536585,
+ 61.50609756097561,
+ 60.785714285714285,
+ 60.70238095238095,
+ 61.11585365853659,
+ 61.28048780487805,
+ 61.11309523809524,
+ 60.654761904761905,
+ 62.85625,
+ 61.61585365853659,
+ 61.90853658536585,
+ 60.32142857142857,
+ 63.2125,
+ 61.603658536585364,
+ 64.3525641025641,
+ 61.84146341463415,
+ 63.3625,
+ 62.50625,
+ 64.58974358974359,
+ 64.5,
+ 61.73170731707317,
+ 63.7,
+ 63.65,
+ 63.41875,
+ 64.38461538461539,
+ 63.0,
+ 62.65625,
+ 63.875,
+ 64.86538461538461,
+ 62.7375,
+ 63.81875,
+ 63.59375,
+ 63.38125,
+ 64.82692307692308,
+ 64.55128205128206,
+ 63.50625,
+ 64.1625,
+ 65.0576923076923,
+ 64.4551282051282,
+ 65.44871794871794,
+ 64.1,
+ 65.24358974358974,
+ 65.1923076923077,
+ 65.0576923076923,
+ 65.49358974358974,
+ 65.8974358974359,
+ 63.95625,
+ 67.13157894736842,
+ 64.68589743589743,
+ 64.65384615384616,
+ 66.5592105263158,
+ 66.07236842105263,
+ 64.0,
+ 64.27564102564102,
+ 65.8076923076923,
+ 64.78205128205128,
+ 64.17948717948718,
+ 64.025,
+ 64.3076923076923,
+ 63.20625,
+ 66.22368421052632,
+ 63.6125,
+ 66.24342105263158,
+ 64.5,
+ 64.36538461538461,
+ 65.2948717948718,
+ 64.43589743589743,
+ 64.18589743589743,
+ 65.95394736842105,
+ 66.51315789473684,
+ 66.35526315789474,
+ 66.46052631578948,
+ 66.86842105263158,
+ 65.42948717948718,
+ 65.00641025641026,
+ 64.61538461538461,
+ 68.16891891891892,
+ 66.5592105263158,
+ 65.0448717948718,
+ 64.53205128205128,
+ 63.65,
+ 65.48717948717949,
+ 67.29605263157895,
+ 65.12179487179488,
+ 66.5,
+ 67.39473684210526,
+ 65.76923076923077,
+ 65.8157894736842,
+ 66.50657894736842,
+ 65.53846153846153,
+ 67.9054054054054,
+ 65.9342105263158,
+ 66.01973684210526,
+ 67.67567567567568,
+ 65.6025641025641,
+ 66.57894736842105,
+ 67.46710526315789,
+ 66.01973684210526,
+ 65.41025641025641,
+ 65.64102564102564,
+ 67.11184210526316,
+ 65.92763157894737,
+ 64.98717948717949,
+ 66.90131578947368,
+ 65.94736842105263,
+ 66.76315789473684,
+ 65.57051282051282,
+ 66.70394736842105,
+ 66.77631578947368,
+ 66.77631578947368,
+ 65.50641025641026,
+ 67.17763157894737,
+ 66.30263157894737,
+ 66.64473684210526,
+ 65.67307692307692,
+ 66.04605263157895,
+ 67.26315789473684,
+ 67.41447368421052,
+ 65.53846153846153,
+ 66.61184210526316,
+ 64.42948717948718,
+ 65.66666666666667,
+ 66.36842105263158,
+ 64.25641025641026,
+ 65.47435897435898,
+ 65.32692307692308,
+ 67.81756756756756,
+ 65.4423076923077,
+ 66.92105263157895,
+ 65.69871794871794,
+ 66.92105263157895
+ ],
+ "ep_length_mean_T": [
+ 204,
+ 10383,
+ 20499,
+ 30544,
+ 40732,
+ 50928,
+ 61090,
+ 71149,
+ 81348,
+ 91439,
+ 101594,
+ 111791,
+ 121859,
+ 132012,
+ 142192,
+ 152334,
+ 162438,
+ 172608,
+ 182793,
+ 192903,
+ 203004,
+ 213200,
+ 223268,
+ 233435,
+ 243539,
+ 253728,
+ 263840,
+ 273903,
+ 284085,
+ 294331,
+ 304469,
+ 314577,
+ 324715,
+ 334829,
+ 344974,
+ 355096,
+ 365159,
+ 375218,
+ 385451,
+ 395480,
+ 405603,
+ 415734,
+ 425903,
+ 436024,
+ 446044,
+ 456086,
+ 466137,
+ 476185,
+ 486421,
+ 496587,
+ 506781,
+ 516860,
+ 526869,
+ 537096,
+ 547346,
+ 557559,
+ 567625,
+ 577737,
+ 587967,
+ 598121,
+ 608154,
+ 618172,
+ 628277,
+ 638573,
+ 648758,
+ 658896,
+ 668985,
+ 678986,
+ 689111,
+ 699143,
+ 709193,
+ 719215,
+ 729432,
+ 739537,
+ 749704,
+ 759809,
+ 769880,
+ 779967,
+ 790179,
+ 800377,
+ 810400,
+ 820450,
+ 830717,
+ 840907,
+ 850964,
+ 861069,
+ 871222,
+ 881356,
+ 891470,
+ 901573,
+ 911612,
+ 921754,
+ 931892,
+ 941893,
+ 951969,
+ 962031,
+ 972155,
+ 982347,
+ 992531,
+ 1002678,
+ 1012722,
+ 1022802,
+ 1032827,
+ 1043047,
+ 1053166,
+ 1063204,
+ 1073415,
+ 1083590,
+ 1093731,
+ 1103844,
+ 1113914,
+ 1124075,
+ 1134341,
+ 1144490,
+ 1154545,
+ 1164755,
+ 1175011,
+ 1185189,
+ 1195359,
+ 1205508,
+ 1215725,
+ 1226005,
+ 1236238,
+ 1246442,
+ 1256533,
+ 1266619,
+ 1276736,
+ 1286779,
+ 1297019,
+ 1307046,
+ 1317312,
+ 1327418,
+ 1337430,
+ 1347674,
+ 1357706,
+ 1367819,
+ 1377885,
+ 1388063,
+ 1398132,
+ 1408194,
+ 1418235,
+ 1428421,
+ 1438473,
+ 1448486,
+ 1458511,
+ 1468621,
+ 1478707,
+ 1488809,
+ 1498973,
+ 1509180,
+ 1519321,
+ 1529401,
+ 1539490,
+ 1549607,
+ 1559754,
+ 1569821,
+ 1580005,
+ 1590221,
+ 1600450,
+ 1610609,
+ 1620717,
+ 1630961,
+ 1641221,
+ 1651225,
+ 1661334,
+ 1671558,
+ 1681608,
+ 1691630,
+ 1701665,
+ 1711681,
+ 1721915,
+ 1732035,
+ 1742290,
+ 1752325,
+ 1762529,
+ 1772769,
+ 1782970,
+ 1792991,
+ 1803129,
+ 1813298,
+ 1823322,
+ 1833470,
+ 1843699,
+ 1853838,
+ 1863988,
+ 1874138,
+ 1884357,
+ 1894568,
+ 1904646,
+ 1914776,
+ 1925021,
+ 1935060,
+ 1945284,
+ 1955531,
+ 1965755,
+ 1975880,
+ 1985931,
+ 1996175,
+ 2006263,
+ 2016287,
+ 2026501,
+ 2036692,
+ 2046729,
+ 2056938,
+ 2067110,
+ 2077359,
+ 2087531
+ ],
+ "episode": [
+ 184,
+ 368,
+ 552,
+ 740,
+ 924,
+ 1108,
+ 1292,
+ 1476,
+ 1660,
+ 1848,
+ 2028,
+ 2204,
+ 2388,
+ 2572,
+ 2752,
+ 2932,
+ 3108,
+ 3292,
+ 3476,
+ 3652,
+ 3840,
+ 4020,
+ 4196,
+ 4376,
+ 4556,
+ 4736,
+ 4916,
+ 5096,
+ 5272,
+ 5448,
+ 5620,
+ 5800,
+ 5976,
+ 6148,
+ 6324,
+ 6496,
+ 6672,
+ 6844,
+ 7020,
+ 7188,
+ 7360,
+ 7528,
+ 7692,
+ 7864,
+ 8028,
+ 8192,
+ 8360,
+ 8524,
+ 8692,
+ 8860,
+ 9032,
+ 9200,
+ 9368,
+ 9536,
+ 9700,
+ 9868,
+ 10032,
+ 10204,
+ 10368,
+ 10528,
+ 10696,
+ 10860,
+ 11024,
+ 11196,
+ 11360,
+ 11528,
+ 11696,
+ 11864,
+ 12028,
+ 12192,
+ 12360,
+ 12528,
+ 12692,
+ 12856,
+ 13024,
+ 13188,
+ 13352,
+ 13520,
+ 13688,
+ 13852,
+ 14016,
+ 14184,
+ 14352,
+ 14512,
+ 14676,
+ 14836,
+ 15004,
+ 15164,
+ 15328,
+ 15484,
+ 15648,
+ 15808,
+ 15968,
+ 16124,
+ 16284,
+ 16448,
+ 16608,
+ 16768,
+ 16928,
+ 17084,
+ 17244,
+ 17404,
+ 17564,
+ 17720,
+ 17880,
+ 18040,
+ 18200,
+ 18360,
+ 18516,
+ 18672,
+ 18832,
+ 18992,
+ 19148,
+ 19304,
+ 19460,
+ 19616,
+ 19772,
+ 19928,
+ 20084,
+ 20240,
+ 20392,
+ 20552,
+ 20704,
+ 20860,
+ 21016,
+ 21168,
+ 21320,
+ 21476,
+ 21632,
+ 21788,
+ 21944,
+ 22100,
+ 22260,
+ 22416,
+ 22576,
+ 22728,
+ 22888,
+ 23040,
+ 23196,
+ 23352,
+ 23508,
+ 23664,
+ 23824,
+ 23980,
+ 24132,
+ 24284,
+ 24436,
+ 24588,
+ 24744,
+ 24900,
+ 25056,
+ 25204,
+ 25356,
+ 25512,
+ 25668,
+ 25828,
+ 25980,
+ 26132,
+ 26288,
+ 26440,
+ 26592,
+ 26744,
+ 26900,
+ 27052,
+ 27204,
+ 27356,
+ 27508,
+ 27664,
+ 27816,
+ 27972,
+ 28124,
+ 28276,
+ 28428,
+ 28580,
+ 28736,
+ 28884,
+ 29040,
+ 29196,
+ 29348,
+ 29500,
+ 29652,
+ 29808,
+ 29960,
+ 30112,
+ 30264,
+ 30420,
+ 30572,
+ 30724,
+ 30876,
+ 31032,
+ 31184,
+ 31336,
+ 31488,
+ 31640,
+ 31792,
+ 31948,
+ 32100,
+ 32252,
+ 32412,
+ 32564,
+ 32720,
+ 32872,
+ 33028,
+ 33180,
+ 33336,
+ 33488
+ ],
+ "episode_T": [
+ 10134,
+ 20279,
+ 30335,
+ 40533,
+ 50696,
+ 60857,
+ 70955,
+ 81136,
+ 91204,
+ 101375,
+ 111572,
+ 121657,
+ 131769,
+ 141974,
+ 152110,
+ 162231,
+ 172364,
+ 182569,
+ 192716,
+ 202781,
+ 212980,
+ 223054,
+ 233194,
+ 243350,
+ 253519,
+ 263608,
+ 273719,
+ 283841,
+ 294060,
+ 304221,
+ 314359,
+ 324477,
+ 334587,
+ 344723,
+ 354831,
+ 364881,
+ 374997,
+ 385191,
+ 395234,
+ 405369,
+ 415477,
+ 425693,
+ 435779,
+ 445793,
+ 455864,
+ 465899,
+ 475923,
+ 485978,
+ 496168,
+ 506296,
+ 516362,
+ 526412,
+ 536609,
+ 546626,
+ 556646,
+ 566649,
+ 576743,
+ 586889,
+ 597173,
+ 607194,
+ 617420,
+ 627554,
+ 637588,
+ 647740,
+ 657892,
+ 668016,
+ 678213,
+ 688354,
+ 698386,
+ 708500,
+ 718695,
+ 728884,
+ 739077,
+ 749235,
+ 759283,
+ 769350,
+ 779465,
+ 789701,
+ 799907,
+ 809919,
+ 819957,
+ 830177,
+ 840400,
+ 850470,
+ 860509,
+ 870532,
+ 880652,
+ 890761,
+ 900844,
+ 910898,
+ 921025,
+ 931125,
+ 941159,
+ 951284,
+ 961525,
+ 971692,
+ 981861,
+ 992022,
+ 1002145,
+ 1012203,
+ 1022221,
+ 1032328,
+ 1042551,
+ 1052657,
+ 1062692,
+ 1072889,
+ 1083122,
+ 1093238,
+ 1103323,
+ 1113374,
+ 1123581,
+ 1133798,
+ 1143976,
+ 1153996,
+ 1164187,
+ 1174207,
+ 1184415,
+ 1194513,
+ 1204731,
+ 1214828,
+ 1224936,
+ 1235174,
+ 1245375,
+ 1255462,
+ 1265606,
+ 1275704,
+ 1285735,
+ 1295787,
+ 1305891,
+ 1316018,
+ 1326154,
+ 1336171,
+ 1346342,
+ 1356350,
+ 1366510,
+ 1376545,
+ 1386774,
+ 1396793,
+ 1406939,
+ 1416952,
+ 1427149,
+ 1437203,
+ 1447435,
+ 1457694,
+ 1467859,
+ 1477878,
+ 1488024,
+ 1498188,
+ 1508363,
+ 1518526,
+ 1528622,
+ 1538708,
+ 1548781,
+ 1558994,
+ 1569089,
+ 1579216,
+ 1589233,
+ 1599487,
+ 1609591,
+ 1619686,
+ 1629854,
+ 1639995,
+ 1650143,
+ 1660285,
+ 1670305,
+ 1680539,
+ 1690678,
+ 1700885,
+ 1711136,
+ 1721361,
+ 1731481,
+ 1741672,
+ 1751736,
+ 1761767,
+ 1771958,
+ 1781963,
+ 1792105,
+ 1802349,
+ 1812528,
+ 1822564,
+ 1832748,
+ 1842968,
+ 1853017,
+ 1863195,
+ 1873346,
+ 1883568,
+ 1893787,
+ 1903789,
+ 1913983,
+ 1924240,
+ 1934275,
+ 1944467,
+ 1954661,
+ 1964794,
+ 1974823,
+ 1984879,
+ 1994883,
+ 2005035,
+ 2015193,
+ 2025224,
+ 2035413,
+ 2045667,
+ 2055833,
+ 2066024,
+ 2076277,
+ 2086468
+ ],
+ "episode_in_buffer": [
+ 184,
+ 368,
+ 552,
+ 740,
+ 924,
+ 1108,
+ 1292,
+ 1476,
+ 1660,
+ 1848,
+ 2028,
+ 2204,
+ 2388,
+ 2572,
+ 2752,
+ 2932,
+ 3108,
+ 3292,
+ 3476,
+ 3652,
+ 3840,
+ 4020,
+ 4196,
+ 4376,
+ 4556,
+ 4736,
+ 4916,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000,
+ 5000
+ ],
+ "episode_in_buffer_T": [
+ 10134,
+ 20279,
+ 30335,
+ 40533,
+ 50696,
+ 60857,
+ 70955,
+ 81136,
+ 91204,
+ 101375,
+ 111572,
+ 121657,
+ 131769,
+ 141974,
+ 152110,
+ 162231,
+ 172364,
+ 182569,
+ 192716,
+ 202781,
+ 212980,
+ 223054,
+ 233194,
+ 243350,
+ 253519,
+ 263608,
+ 273719,
+ 283841,
+ 294060,
+ 304221,
+ 314359,
+ 324477,
+ 334587,
+ 344723,
+ 354831,
+ 364881,
+ 374997,
+ 385191,
+ 395234,
+ 405369,
+ 415477,
+ 425693,
+ 435779,
+ 445793,
+ 455864,
+ 465899,
+ 475923,
+ 485978,
+ 496168,
+ 506296,
+ 516362,
+ 526412,
+ 536609,
+ 546626,
+ 556646,
+ 566649,
+ 576743,
+ 586889,
+ 597173,
+ 607194,
+ 617420,
+ 627554,
+ 637588,
+ 647740,
+ 657892,
+ 668016,
+ 678213,
+ 688354,
+ 698386,
+ 708500,
+ 718695,
+ 728884,
+ 739077,
+ 749235,
+ 759283,
+ 769350,
+ 779465,
+ 789701,
+ 799907,
+ 809919,
+ 819957,
+ 830177,
+ 840400,
+ 850470,
+ 860509,
+ 870532,
+ 880652,
+ 890761,
+ 900844,
+ 910898,
+ 921025,
+ 931125,
+ 941159,
+ 951284,
+ 961525,
+ 971692,
+ 981861,
+ 992022,
+ 1002145,
+ 1012203,
+ 1022221,
+ 1032328,
+ 1042551,
+ 1052657,
+ 1062692,
+ 1072889,
+ 1083122,
+ 1093238,
+ 1103323,
+ 1113374,
+ 1123581,
+ 1133798,
+ 1143976,
+ 1153996,
+ 1164187,
+ 1174207,
+ 1184415,
+ 1194513,
+ 1204731,
+ 1214828,
+ 1224936,
+ 1235174,
+ 1245375,
+ 1255462,
+ 1265606,
+ 1275704,
+ 1285735,
+ 1295787,
+ 1305891,
+ 1316018,
+ 1326154,
+ 1336171,
+ 1346342,
+ 1356350,
+ 1366510,
+ 1376545,
+ 1386774,
+ 1396793,
+ 1406939,
+ 1416952,
+ 1427149,
+ 1437203,
+ 1447435,
+ 1457694,
+ 1467859,
+ 1477878,
+ 1488024,
+ 1498188,
+ 1508363,
+ 1518526,
+ 1528622,
+ 1538708,
+ 1548781,
+ 1558994,
+ 1569089,
+ 1579216,
+ 1589233,
+ 1599487,
+ 1609591,
+ 1619686,
+ 1629854,
+ 1639995,
+ 1650143,
+ 1660285,
+ 1670305,
+ 1680539,
+ 1690678,
+ 1700885,
+ 1711136,
+ 1721361,
+ 1731481,
+ 1741672,
+ 1751736,
+ 1761767,
+ 1771958,
+ 1781963,
+ 1792105,
+ 1802349,
+ 1812528,
+ 1822564,
+ 1832748,
+ 1842968,
+ 1853017,
+ 1863195,
+ 1873346,
+ 1883568,
+ 1893787,
+ 1903789,
+ 1913983,
+ 1924240,
+ 1934275,
+ 1944467,
+ 1954661,
+ 1964794,
+ 1974823,
+ 1984879,
+ 1994883,
+ 2005035,
+ 2015193,
+ 2025224,
+ 2035413,
+ 2045667,
+ 2055833,
+ 2066024,
+ 2076277,
+ 2086468
+ ],
+ "epsilon": [
+ 1.0,
+ 0.9037270000000001,
+ 0.8073495,
+ 0.7118175,
+ 0.6149365,
+ 0.5183880000000001,
+ 0.4218585,
+ 0.32592750000000004,
+ 0.22920800000000008,
+ 0.13356200000000007,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05,
+ 0.05
+ ],
+ "epsilon_T": [
+ 204,
+ 10383,
+ 20499,
+ 30544,
+ 40732,
+ 50928,
+ 61090,
+ 71149,
+ 81348,
+ 91439,
+ 101594,
+ 111791,
+ 121859,
+ 132012,
+ 142192,
+ 152334,
+ 162438,
+ 172608,
+ 182793,
+ 192903,
+ 203004,
+ 213200,
+ 223268,
+ 233435,
+ 243539,
+ 253728,
+ 263840,
+ 273903,
+ 284085,
+ 294331,
+ 304469,
+ 314577,
+ 324715,
+ 334829,
+ 344974,
+ 355096,
+ 365159,
+ 375218,
+ 385451,
+ 395480,
+ 405603,
+ 415734,
+ 425903,
+ 436024,
+ 446044,
+ 456086,
+ 466137,
+ 476185,
+ 486421,
+ 496587,
+ 506781,
+ 516860,
+ 526869,
+ 537096,
+ 547346,
+ 557559,
+ 567625,
+ 577737,
+ 587967,
+ 598121,
+ 608154,
+ 618172,
+ 628277,
+ 638573,
+ 648758,
+ 658896,
+ 668985,
+ 678986,
+ 689111,
+ 699143,
+ 709193,
+ 719215,
+ 729432,
+ 739537,
+ 749704,
+ 759809,
+ 769880,
+ 779967,
+ 790179,
+ 800377,
+ 810400,
+ 820450,
+ 830717,
+ 840907,
+ 850964,
+ 861069,
+ 871222,
+ 881356,
+ 891470,
+ 901573,
+ 911612,
+ 921754,
+ 931892,
+ 941893,
+ 951969,
+ 962031,
+ 972155,
+ 982347,
+ 992531,
+ 1002678,
+ 1012722,
+ 1022802,
+ 1032827,
+ 1043047,
+ 1053166,
+ 1063204,
+ 1073415,
+ 1083590,
+ 1093731,
+ 1103844,
+ 1113914,
+ 1124075,
+ 1134341,
+ 1144490,
+ 1154545,
+ 1164755,
+ 1175011,
+ 1185189,
+ 1195359,
+ 1205508,
+ 1215725,
+ 1226005,
+ 1236238,
+ 1246442,
+ 1256533,
+ 1266619,
+ 1276736,
+ 1286779,
+ 1297019,
+ 1307046,
+ 1317312,
+ 1327418,
+ 1337430,
+ 1347674,
+ 1357706,
+ 1367819,
+ 1377885,
+ 1388063,
+ 1398132,
+ 1408194,
+ 1418235,
+ 1428421,
+ 1438473,
+ 1448486,
+ 1458511,
+ 1468621,
+ 1478707,
+ 1488809,
+ 1498973,
+ 1509180,
+ 1519321,
+ 1529401,
+ 1539490,
+ 1549607,
+ 1559754,
+ 1569821,
+ 1580005,
+ 1590221,
+ 1600450,
+ 1610609,
+ 1620717,
+ 1630961,
+ 1641221,
+ 1651225,
+ 1661334,
+ 1671558,
+ 1681608,
+ 1691630,
+ 1701665,
+ 1711681,
+ 1721915,
+ 1732035,
+ 1742290,
+ 1752325,
+ 1762529,
+ 1772769,
+ 1782970,
+ 1792991,
+ 1803129,
+ 1813298,
+ 1823322,
+ 1833470,
+ 1843699,
+ 1853838,
+ 1863988,
+ 1874138,
+ 1884357,
+ 1894568,
+ 1904646,
+ 1914776,
+ 1925021,
+ 1935060,
+ 1945284,
+ 1955531,
+ 1965755,
+ 1975880,
+ 1985931,
+ 1996175,
+ 2006263,
+ 2016287,
+ 2026501,
+ 2036692,
+ 2046729,
+ 2056938,
+ 2067110,
+ 2077359,
+ 2087531
+ ],
+ "grad_norm": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzY5NDMwMzYxMTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDc2OTQzMDM2MTEycQFhLgEAAAAAAAAA3CHNPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzY5NDMwNjYxNjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDc2OTQzMDY2MTYwcQFhLgEAAAAAAAAAN3iFPg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzY5NDMwMjgyNDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDc2OTQzMDI4MjQwcQFhLgEAAAAAAAAAepLzPg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzY5NDMwNzY2MjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDc2OTQzMDc2NjI0cQFhLgEAAAAAAAAAtToCPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzY5NDMwNzI2ODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDc2OTQzMDcyNjg4cQFhLgEAAAAAAAAAqc+ZPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzY5NDMwMDkzMjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDc2OTQzMDA5MzI4cQFhLgEAAAAAAAAAU0tmPg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzY5NDMwNjE5MzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDc2OTQzMDYxOTM2cQFhLgEAAAAAAAAAEXM9Pw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MzQzODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODM0Mzg0cQFhLgEAAAAAAAAAif9OQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MjExMzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODIxMTM2cQFhLgEAAAAAAAAAz8QdPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3MjM2OTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzIzNjk2cQFhLgEAAAAAAAAAakYBPg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3MjUzMjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzI1MzI4cQFhLgEAAAAAAAAAv2sAPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3MjIwNjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzIyMDY0cQFhLgEAAAAAAAAAchToPg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3OTg1NzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3Nzk4NTc2cQFhLgEAAAAAAAAA1ArUPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NzExMjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzcxMTIwcQFhLgEAAAAAAAAAebFzPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3ODc5MjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3Nzg3OTIwcQFhLgEAAAAAAAAAB1AvPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MDIxMjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODAyMTI4cQFhLgEAAAAAAAAAczq+Pw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NzA1NDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzcwNTQ0cQFhLgEAAAAAAAAAlCAvQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NTI5NzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzUyOTc2cQFhLgEAAAAAAAAAZsqvPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NzI3NTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzcyNzUycQFhLgEAAAAAAAAA4xgPQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4Mjc1NjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODI3NTY4cQFhLgEAAAAAAAAAK4nGPg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3ODIzNTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzgyMzUycQFhLgEAAAAAAAAAgnNoPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NzE4ODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzcxODg4cQFhLgEAAAAAAAAAa62XPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MjE5MDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODIxOTA0cQFhLgEAAAAAAAAAf/VOPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3ODY5NjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3Nzg2OTYwcQFhLgEAAAAAAAAAAt4BPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3OTAzMjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzkwMzIwcQFhLgEAAAAAAAAAHpNEPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4Mzg3MDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODM4NzA0cQFhLgEAAAAAAAAAg0F2Pw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3OTk2MzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3Nzk5NjMycQFhLgEAAAAAAAAA4K3BPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3MzIxNDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzMyMTQ0cQFhLgEAAAAAAAAA8vnnPg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3MTUzNDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzE1MzQ0cQFhLgEAAAAAAAAAkysDQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NDAwMTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzQwMDE2cQFhLgEAAAAAAAAAhAa2Pw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3MTc2NDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzE3NjQ4cQFhLgEAAAAAAAAA7TYqQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3MTkxODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzE5MTg0cQFhLgEAAAAAAAAA3S7GPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3MzAzMjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzMwMzIwcQFhLgEAAAAAAAAAdcA8QA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MTE1MzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODExNTM2cQFhLgEAAAAAAAAAlcXnPg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MTgyNTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODE4MjU2cQFhLgEAAAAAAAAABFJFPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NjM4MjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzYzODI0cQFhLgEAAAAAAAAAOo2oPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NjczNzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzY3Mzc2cQFhLgEAAAAAAAAA/ersPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MjYyMjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODI2MjI0cQFhLgEAAAAAAAAAwaFhPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NDg4NDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzQ4ODQ4cQFhLgEAAAAAAAAAjkoCPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NDIyMjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzQyMjI0cQFhLgEAAAAAAAAAgbsRPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3MzQwNjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzM0MDY0cQFhLgEAAAAAAAAArUtlQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MjY4MDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODI2ODAwcQFhLgEAAAAAAAAAcK0cQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NjU2NDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzY1NjQ4cQFhLgEAAAAAAAAABzgNPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NDA3ODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzQwNzg0cQFhLgEAAAAAAAAA8KsBQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3OTM1ODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzkzNTg0cQFhLgEAAAAAAAAAHxZEQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3ODg1OTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3Nzg4NTkycQFhLgEAAAAAAAAArG5GPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NTI2ODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzUyNjg4cQFhLgEAAAAAAAAAT1sBPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MTQ2MDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODE0NjA4cQFhLgEAAAAAAAAAwueRPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3MTg0MTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzE4NDE2cQFhLgEAAAAAAAAAlGztPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3Njc1NjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzY3NTY4cQFhLgEAAAAAAAAAJQiJPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MTQxMjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODE0MTI4cQFhLgEAAAAAAAAAAu0nQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NjMyNDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzYzMjQ4cQFhLgEAAAAAAAAAZtwTPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MDYzNTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODA2MzUycQFhLgEAAAAAAAAAmYYvQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NzY0OTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3Nzc2NDk2cQFhLgEAAAAAAAAAocgpQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3Nzk0NzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3Nzc5NDcycQFhLgEAAAAAAAAAluQqQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3OTA0MTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzkwNDE2cQFhLgEAAAAAAAAAGfulPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MzgxMjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODM4MTI4cQFhLgEAAAAAAAAAWH2WPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3Mzk3MjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzM5NzI4cQFhLgEAAAAAAAAAea/WPg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3MTc5MzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzE3OTM2cQFhLgEAAAAAAAAA21b1Pw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3OTM0ODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzkzNDg4cQFhLgEAAAAAAAAAEenLPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3ODQ5NDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3Nzg0OTQ0cQFhLgEAAAAAAAAAWAIfQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MTM0NTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODEzNDU2cQFhLgEAAAAAAAAAHs5ZPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NjEzMjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzYxMzI4cQFhLgEAAAAAAAAAzYgXPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NDMyODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzQzMjgwcQFhLgEAAAAAAAAAYi6zPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MDYyNTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODA2MjU2cQFhLgEAAAAAAAAAqFDwPg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MjI3NjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODIyNzY4cQFhLgEAAAAAAAAA3McNQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MzQ5NjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODM0OTYwcQFhLgEAAAAAAAAAYGHZPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NzU0NDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3Nzc1NDQwcQFhLgEAAAAAAAAA9ryOPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NzA3MzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzcwNzM2cQFhLgEAAAAAAAAAGLGtPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NDc5ODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzQ3OTg0cQFhLgEAAAAAAAAAt5ILQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MjQyMDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODI0MjA4cQFhLgEAAAAAAAAAgZEEQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3MzE0NzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzMxNDcycQFhLgEAAAAAAAAAnFkeQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3Mzc4MDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzM3ODA4cQFhLgEAAAAAAAAA+naUPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NTA1NzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzUwNTc2cQFhLgEAAAAAAAAAnz4hQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NzU1MzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3Nzc1NTM2cQFhLgEAAAAAAAAAEjD8Pg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3OTUzMTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3Nzk1MzEycQFhLgEAAAAAAAAAiGtTPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MDIyMjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODAyMjI0cQFhLgEAAAAAAAAA41s9Pw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NjQwMTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzY0MDE2cQFhLgEAAAAAAAAADzbCPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NTkyMTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzU5MjE2cQFhLgEAAAAAAAAAXczxPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MDU0ODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODA1NDg4cQFhLgEAAAAAAAAACxd2Pw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MzgwMzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODM4MDMycQFhLgEAAAAAAAAAx0SmPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MDY3MzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODA2NzM2cQFhLgEAAAAAAAAAGqbvPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3MjIxNjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzIyMTYwcQFhLgEAAAAAAAAApBs8Pw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NDUwMDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzQ1MDA4cQFhLgEAAAAAAAAAw5gCPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MzMzMjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODMzMzI4cQFhLgEAAAAAAAAApUYfPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3MTYyMDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzE2MjA4cQFhLgEAAAAAAAAA3r/hPg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3Njk2ODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzY5NjgwcQFhLgEAAAAAAAAAz2CBQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3OTg0ODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3Nzk4NDgwcQFhLgEAAAAAAAAAhnl7QA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3OTQ5MjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3Nzk0OTI4cQFhLgEAAAAAAAAAQHKNPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NDU5NjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzQ1OTY4cQFhLgEAAAAAAAAA6q1fQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NjkyMDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzY5MjAwcQFhLgEAAAAAAAAAjl+jPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MjAyNzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODIwMjcycQFhLgEAAAAAAAAAD5uFPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3ODUyMzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3Nzg1MjMycQFhLgEAAAAAAAAAXny1Pw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3MzA2MDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzMwNjA4cQFhLgEAAAAAAAAAZlRlQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NTQxMjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzU0MTI4cQFhLgEAAAAAAAAAIrPwPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MjY5OTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODI2OTkycQFhLgEAAAAAAAAAttC8Pw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MTc4NzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODE3ODcycQFhLgEAAAAAAAAAFUGAPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MjgxNDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODI4MTQ0cQFhLgEAAAAAAAAA1S94Pw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NzgzMjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3Nzc4MzIwcQFhLgEAAAAAAAAAhtORPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4Mjc2NjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODI3NjY0cQFhLgEAAAAAAAAAaIvSPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MDQ2MjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODA0NjI0cQFhLgEAAAAAAAAADCpTPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MzE2OTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODMxNjk2cQFhLgEAAAAAAAAAZamIPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3ODgxMTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3Nzg4MTEycQFhLgEAAAAAAAAATWbdPg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NTM4NDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzUzODQwcQFhLgEAAAAAAAAAy1ocPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3MTQxOTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzE0MTkycQFhLgEAAAAAAAAAGuimPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MDQ0MzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODA0NDMycQFhLgEAAAAAAAAAeIxpQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3ODg4ODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3Nzg4ODgwcQFhLgEAAAAAAAAAozlnPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NDY4MzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzQ2ODMycQFhLgEAAAAAAAAAJoilPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3MzgyODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzM4Mjg4cQFhLgEAAAAAAAAA1ipOPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MTc2ODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODE3NjgwcQFhLgEAAAAAAAAAkyGMPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3Njc3NjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzY3NzYwcQFhLgEAAAAAAAAArg0TPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NjQ2ODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzY0Njg4cQFhLgEAAAAAAAAA/dgqPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4Mzc4NDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODM3ODQwcQFhLgEAAAAAAAAAEGY9Pw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NzE3OTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzcxNzkycQFhLgEAAAAAAAAAdTcqPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3MjEyOTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzIxMjk2cQFhLgEAAAAAAAAADVEOQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3OTkzNDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3Nzk5MzQ0cQFhLgEAAAAAAAAA2DH4Pg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NDI4OTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzQyODk2cQFhLgEAAAAAAAAAWbGOPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NTgyNTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzU4MjU2cQFhLgEAAAAAAAAA5JCZPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NDkwNDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzQ5MDQwcQFhLgEAAAAAAAAAJqZwPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NDAzMDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzQwMzA0cQFhLgEAAAAAAAAAisxoPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NzI4NDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzcyODQ4cQFhLgEAAAAAAAAAWvx/QA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NDA1OTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzQwNTkycQFhLgEAAAAAAAAAzEZmPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NDUyMDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzQ1MjAwcQFhLgEAAAAAAAAAYmcpQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MDg5NDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODA4OTQ0cQFhLgEAAAAAAAAA29eqPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NDg3NTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzQ4NzUycQFhLgEAAAAAAAAA+ZObPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3MzYzNjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzM2MzY4cQFhLgEAAAAAAAAAuFE8Pw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MTE4MjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODExODI0cQFhLgEAAAAAAAAA36EVPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3MTMwNDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzEzMDQwcQFhLgEAAAAAAAAAHLrDPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MTk4ODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODE5ODg4cQFhLgEAAAAAAAAA0OAgQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MjA0NjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODIwNDY0cQFhLgEAAAAAAAAAT+4pQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3ODI0NDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzgyNDQ4cQFhLgEAAAAAAAAAOSWZPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MzA2NDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODMwNjQwcQFhLgEAAAAAAAAAsFv5Pw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MTUxODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODE1MTg0cQFhLgEAAAAAAAAAFd+lPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3MjkxNjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzI5MTY4cQFhLgEAAAAAAAAAIULnPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3MjIyNTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzIyMjU2cQFhLgEAAAAAAAAAx3i9Pw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3OTUyMTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3Nzk1MjE2cQFhLgEAAAAAAAAAyOVhPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MTgxNjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODE4MTYwcQFhLgEAAAAAAAAAR/AtQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NDc2OTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzQ3Njk2cQFhLgEAAAAAAAAAsdWtPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3ODYyODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3Nzg2Mjg4cQFhLgEAAAAAAAAAz/mCPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MTEyNDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODExMjQ4cQFhLgEAAAAAAAAAaLu8Pw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3MzM1ODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzMzNTg0cQFhLgEAAAAAAAAAS3YnPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3Nzg2MDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3Nzc4NjA4cQFhLgEAAAAAAAAAIzBnPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MTc5NjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODE3OTY4cQFhLgEAAAAAAAAAN34RPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3OTI0MzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzkyNDMycQFhLgEAAAAAAAAAzTMUQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MDA0MDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODAwNDAwcQFhLgEAAAAAAAAA0CJAQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NDczMTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzQ3MzEycQFhLgEAAAAAAAAA9JhpPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3MzY1NjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzM2NTYwcQFhLgEAAAAAAAAAYYM0Pw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3MjY4NjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzI2ODY0cQFhLgEAAAAAAAAADWL3Pg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NjMzNDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzYzMzQ0cQFhLgEAAAAAAAAAb1gGPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MDY0NDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODA2NDQ4cQFhLgEAAAAAAAAA11plPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MjkzOTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODI5MzkycQFhLgEAAAAAAAAAzPtEPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzY5NDMwNTc5MDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDc2OTQzMDU3OTA0cQFhLgEAAAAAAAAArb59Pw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzY5NDMwMTk4ODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDc2OTQzMDE5ODg4cQFhLgEAAAAAAAAAqOA3Pw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3MTY2ODhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzE2Njg4cQFhLgEAAAAAAAAAlarWPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3MTMxMzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzEzMTM2cQFhLgEAAAAAAAAA0mEXPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzY5NDI5OTM1ODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDc2OTQyOTkzNTg0cQFhLgEAAAAAAAAAXGHIQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MzI1NjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODMyNTYwcQFhLgEAAAAAAAAAlY1wQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzY5NDMwMzQxOTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDc2OTQzMDM0MTkycQFhLgEAAAAAAAAAOC8TQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3Nzk3NjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3Nzc5NzYwcQFhLgEAAAAAAAAAXfWPPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3MzA1MTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzMwNTEycQFhLgEAAAAAAAAA9nUOPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzY5NDMwMzI4NDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDc2OTQzMDMyODQ4cQFhLgEAAAAAAAAAejehQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MzgyMjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODM4MjI0cQFhLgEAAAAAAAAAxUalPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzY5NDI5NTAwOTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDc2OTQyOTUwMDk2cQFhLgEAAAAAAAAAJQzKPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3ODA4MTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzgwODE2cQFhLgEAAAAAAAAAhjFXPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3MjI1NDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzIyNTQ0cQFhLgEAAAAAAAAAAZGQPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3ODgyMDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3Nzg4MjA4cQFhLgEAAAAAAAAAlm9/Pw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzY5NDMwNzI4ODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDc2OTQzMDcyODgwcQFhLgEAAAAAAAAAjiTSPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3MjI3MzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzIyNzM2cQFhLgEAAAAAAAAAE9NjQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzY5NDMwNzYxNDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDc2OTQzMDc2MTQ0cQFhLgEAAAAAAAAArhTQPg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzY5NDMwMDUxMDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDc2OTQzMDA1MTA0cQFhLgEAAAAAAAAA3I+QPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MDc0MDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODA3NDA4cQFhLgEAAAAAAAAA0AMkPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzY5NDMwNjk0MjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDc2OTQzMDY5NDI0cQFhLgEAAAAAAAAAgnZwPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NjMxNTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzYzMTUycQFhLgEAAAAAAAAARWAkPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzY5NDMwMTQyMjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDc2OTQzMDE0MjI0cQFhLgEAAAAAAAAAPc6APw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3Njg1MjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzY4NTI4cQFhLgEAAAAAAAAAMwysPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzY5NDMwNzQ5OTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDc2OTQzMDc0OTkycQFhLgEAAAAAAAAAjtWkPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzY5NDI5NDg1NjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDc2OTQyOTQ4NTYwcQFhLgEAAAAAAAAA5iwbPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3OTMwMDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzkzMDA4cQFhLgEAAAAAAAAANMggPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3ODI3MzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzgyNzM2cQFhLgEAAAAAAAAAONQGQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NzY1OTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3Nzc2NTkycQFhLgEAAAAAAAAA0KuUPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NjMwNTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzYzMDU2cQFhLgEAAAAAAAAAMP2BQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NjY2MDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzY2NjA4cQFhLgEAAAAAAAAAnWeBPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MjAwODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODIwMDgwcQFhLgEAAAAAAAAAwj8RQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzY5NDMwMTk2OTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDc2OTQzMDE5Njk2cQFhLgEAAAAAAAAA6y3rPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzY5NDI5OTU2OTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDc2OTQyOTk1Njk2cQFhLgEAAAAAAAAAkS5QPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3ODcwNTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3Nzg3MDU2cQFhLgEAAAAAAAAApD5WPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3MjA2MjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzIwNjI0cQFhLgEAAAAAAAAAPnQpPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MjE1MjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODIxNTIwcQFhLgEAAAAAAAAAB3/WPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MDc5ODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODA3OTg0cQFhLgEAAAAAAAAAGomdPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzY5NDMwNjQ4MTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDc2OTQzMDY0ODE2cQFhLgEAAAAAAAAAhPUxQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NDc1MDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzQ3NTA0cQFhLgEAAAAAAAAAuZKQPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MjU4NDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODI1ODQwcQFhLgEAAAAAAAAArBE7Pw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MjY2MDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODI2NjA4cQFhLgEAAAAAAAAArJhEPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzY5NDMwMjMxNTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDc2OTQzMDIzMTUycQFhLgEAAAAAAAAAoImNPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzY5NDMwMDU3NzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDc2OTQzMDA1Nzc2cQFhLgEAAAAAAAAAw8/APw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NDY1NDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzQ2NTQ0cQFhLgEAAAAAAAAABm0EPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MTc3NzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODE3Nzc2cQFhLgEAAAAAAAAAhC4OPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3OTI1MjhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzkyNTI4cQFhLgEAAAAAAAAAwtiBPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3MTI3NTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzEyNzUycQFhLgEAAAAAAAAALw8JPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3MjE3NzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzIxNzc2cQFhLgEAAAAAAAAAONjjPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NzM3MTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzczNzEycQFhLgEAAAAAAAAAt8OgPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzY5NDMwNzM4NDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDc2OTQzMDczODQwcQFhLgEAAAAAAAAA/DiWPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzY5NDI5NDk0MjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDc2OTQyOTQ5NDI0cQFhLgEAAAAAAAAApKbOPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzY5NDMwMjU0NTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDc2OTQzMDI1NDU2cQFhLgEAAAAAAAAAApgNPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc3NTk3OTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3NzU5NzkycQFhLgEAAAAAAAAATIFMPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADMwNzA0Njc4MjQ3ODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAzMDcwNDY3ODI0Nzg0cQFhLgEAAAAAAAAAAR9IQA=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "grad_norm_T": [
+ 6986,
+ 17208,
+ 27326,
+ 37446,
+ 47446,
+ 57560,
+ 67646,
+ 77674,
+ 87676,
+ 97697,
+ 107788,
+ 117898,
+ 127977,
+ 138187,
+ 148314,
+ 158336,
+ 168474,
+ 178659,
+ 188844,
+ 198937,
+ 209085,
+ 219203,
+ 229205,
+ 239302,
+ 249540,
+ 259654,
+ 269883,
+ 279962,
+ 290103,
+ 300233,
+ 310348,
+ 320404,
+ 330621,
+ 340774,
+ 350782,
+ 360905,
+ 371032,
+ 381218,
+ 391260,
+ 401437,
+ 411519,
+ 421583,
+ 431831,
+ 442080,
+ 452171,
+ 462389,
+ 472545,
+ 482562,
+ 492759,
+ 502876,
+ 513077,
+ 523291,
+ 533582,
+ 543750,
+ 553770,
+ 563908,
+ 573965,
+ 584119,
+ 594318,
+ 604610,
+ 614781,
+ 624841,
+ 634985,
+ 645190,
+ 655224,
+ 665431,
+ 675582,
+ 685795,
+ 695958,
+ 706251,
+ 716474,
+ 726601,
+ 736650,
+ 746776,
+ 757036,
+ 767142,
+ 777302,
+ 787370,
+ 797412,
+ 807474,
+ 817490,
+ 827784,
+ 837975,
+ 848136,
+ 858264,
+ 868484,
+ 878714,
+ 888844,
+ 899094,
+ 909295,
+ 919346,
+ 929608,
+ 939629,
+ 949834,
+ 959847,
+ 969980,
+ 980122,
+ 990167,
+ 1000421,
+ 1010682,
+ 1020899,
+ 1031029,
+ 1041227,
+ 1051318,
+ 1061418,
+ 1071617,
+ 1081746,
+ 1091976,
+ 1102076,
+ 1112333,
+ 1122555,
+ 1132784,
+ 1142944,
+ 1152981,
+ 1163172,
+ 1173184,
+ 1183364,
+ 1193423,
+ 1203440,
+ 1213549,
+ 1223612,
+ 1233681,
+ 1243748,
+ 1253938,
+ 1263975,
+ 1274067,
+ 1284125,
+ 1294253,
+ 1304340,
+ 1314506,
+ 1324563,
+ 1334581,
+ 1344810,
+ 1354992,
+ 1365010,
+ 1375043,
+ 1385083,
+ 1395237,
+ 1405432,
+ 1415708,
+ 1425821,
+ 1435936,
+ 1445973,
+ 1456099,
+ 1466151,
+ 1476185,
+ 1486372,
+ 1496551,
+ 1506712,
+ 1516778,
+ 1527042,
+ 1537198,
+ 1547453,
+ 1557613,
+ 1567733,
+ 1577958,
+ 1588198,
+ 1598438,
+ 1608533,
+ 1618646,
+ 1628879,
+ 1638950,
+ 1649053,
+ 1659258,
+ 1669530,
+ 1679723,
+ 1689942,
+ 1700096,
+ 1710325,
+ 1720334,
+ 1730442,
+ 1740566,
+ 1750671,
+ 1760753,
+ 1770969,
+ 1781219,
+ 1791341,
+ 1801579,
+ 1811774,
+ 1821795,
+ 1831999,
+ 1842201,
+ 1852343,
+ 1862458,
+ 1872561,
+ 1882789,
+ 1893002,
+ 1903009,
+ 1913162,
+ 1923225,
+ 1933463,
+ 1943702,
+ 1953874,
+ 1964002,
+ 1974036,
+ 1984114,
+ 1994349,
+ 2004522,
+ 2014669,
+ 2024770,
+ 2034948,
+ 2045125,
+ 2055287,
+ 2065451,
+ 2075690,
+ 2085862
+ ],
+ "loss_td": [
+ 0.12952986359596252,
+ 0.030188875272870064,
+ 0.03579281270503998,
+ 0.03706536442041397,
+ 0.057708751410245895,
+ 0.04529724270105362,
+ 0.05723081901669502,
+ 0.10106198489665985,
+ 0.04929080232977867,
+ 0.04307936504483223,
+ 0.05558697134256363,
+ 0.052993983030319214,
+ 0.060012973845005035,
+ 0.0636521503329277,
+ 0.06540510058403015,
+ 0.06983207911252975,
+ 0.07843239605426788,
+ 0.06843740493059158,
+ 0.08329121023416519,
+ 0.06600295007228851,
+ 0.07889227569103241,
+ 0.07440505921840668,
+ 0.08197596669197083,
+ 0.08196178078651428,
+ 0.09418430924415588,
+ 0.08820083737373352,
+ 0.10003215819597244,
+ 0.09546726942062378,
+ 0.09992783516645432,
+ 0.094876728951931,
+ 0.10917875170707703,
+ 0.09720480442047119,
+ 0.11634595692157745,
+ 0.09551189839839935,
+ 0.09455158561468124,
+ 0.10009671002626419,
+ 0.1003352478146553,
+ 0.09168204665184021,
+ 0.09287209808826447,
+ 0.08861605823040009,
+ 0.11207234859466553,
+ 0.09424770623445511,
+ 0.0968918725848198,
+ 0.10213218629360199,
+ 0.10938456654548645,
+ 0.09888958930969238,
+ 0.08441272377967834,
+ 0.08881093561649323,
+ 0.09725494682788849,
+ 0.09214361757040024,
+ 0.10304427146911621,
+ 0.0877867117524147,
+ 0.10175342857837677,
+ 0.10062199085950851,
+ 0.10363547503948212,
+ 0.09426748007535934,
+ 0.09842141717672348,
+ 0.09297870844602585,
+ 0.10858514904975891,
+ 0.103915274143219,
+ 0.1019936054944992,
+ 0.1007244661450386,
+ 0.09983036667108536,
+ 0.09767334163188934,
+ 0.09562354534864426,
+ 0.09870617091655731,
+ 0.10673592984676361,
+ 0.10371345281600952,
+ 0.09291326254606247,
+ 0.10490182042121887,
+ 0.10468296706676483,
+ 0.10017982125282288,
+ 0.09856314212083817,
+ 0.09623045474290848,
+ 0.10055223107337952,
+ 0.10053295642137527,
+ 0.10345025360584259,
+ 0.09560725837945938,
+ 0.09875987470149994,
+ 0.10360373556613922,
+ 0.09096477925777435,
+ 0.09330710768699646,
+ 0.10539326816797256,
+ 0.08943403512239456,
+ 0.0909726619720459,
+ 0.09611799567937851,
+ 0.10780072212219238,
+ 0.11058028042316437,
+ 0.09297849237918854,
+ 0.10436087101697922,
+ 0.09956949949264526,
+ 0.10417399555444717,
+ 0.09528127312660217,
+ 0.1106286272406578,
+ 0.10242107510566711,
+ 0.1042756512761116,
+ 0.10197504609823227,
+ 0.10496865957975388,
+ 0.10466541349887848,
+ 0.09493318200111389,
+ 0.09206227958202362,
+ 0.09002862125635147,
+ 0.09812025725841522,
+ 0.09116349369287491,
+ 0.10210861265659332,
+ 0.10073334723711014,
+ 0.10630447417497635,
+ 0.09918243438005447,
+ 0.09411199390888214,
+ 0.09550060331821442,
+ 0.09899230301380157,
+ 0.10320276021957397,
+ 0.10207421332597733,
+ 0.09522479772567749,
+ 0.10811486095190048,
+ 0.11149308830499649,
+ 0.09429185837507248,
+ 0.09631936997175217,
+ 0.10086491703987122,
+ 0.09439888596534729,
+ 0.11340086907148361,
+ 0.09217720478773117,
+ 0.1116589903831482,
+ 0.0942549780011177,
+ 0.09636066108942032,
+ 0.1021905317902565,
+ 0.09894206374883652,
+ 0.10254630446434021,
+ 0.09414931386709213,
+ 0.09663713723421097,
+ 0.0937880128622055,
+ 0.10463512688875198,
+ 0.09302869439125061,
+ 0.0878915935754776,
+ 0.09396804869174957,
+ 0.09473445266485214,
+ 0.10010261088609695,
+ 0.08698989450931549,
+ 0.09070391207933426,
+ 0.09212058037519455,
+ 0.08187764883041382,
+ 0.08952119946479797,
+ 0.08928222954273224,
+ 0.0927542969584465,
+ 0.09452532976865768,
+ 0.08582685142755508,
+ 0.09726907312870026,
+ 0.09218023717403412,
+ 0.093818798661232,
+ 0.09312295913696289,
+ 0.1009458377957344,
+ 0.10164926946163177,
+ 0.0887281745672226,
+ 0.0967981144785881,
+ 0.09302960336208344,
+ 0.13151341676712036,
+ 0.13371936976909637,
+ 0.12029292434453964,
+ 0.0990375205874443,
+ 0.0957237109541893,
+ 0.1412898749113083,
+ 0.09589672833681107,
+ 0.0970832034945488,
+ 0.1026608794927597,
+ 0.09581393003463745,
+ 0.10263115912675858,
+ 0.09127192199230194,
+ 0.10929912328720093,
+ 0.09422168135643005,
+ 0.09837611764669418,
+ 0.09490285068750381,
+ 0.09902021288871765,
+ 0.09099949896335602,
+ 0.09102153778076172,
+ 0.10069850832223892,
+ 0.09744609892368317,
+ 0.09116481989622116,
+ 0.09100459516048431,
+ 0.09865892678499222,
+ 0.0860518291592598,
+ 0.10970527678728104,
+ 0.10269363224506378,
+ 0.09025291353464127,
+ 0.0917256772518158,
+ 0.08693130314350128,
+ 0.09693792462348938,
+ 0.08520610630512238,
+ 0.08427298814058304,
+ 0.09101413935422897,
+ 0.09105934202671051,
+ 0.08361319452524185,
+ 0.07744954526424408,
+ 0.07569766789674759,
+ 0.09109757095575333,
+ 0.07748094946146011,
+ 0.07142316550016403,
+ 0.07914120703935623,
+ 0.08527810871601105,
+ 0.0767289400100708,
+ 0.0935053750872612,
+ 0.0789598748087883,
+ 0.077302485704422,
+ 0.0776020810008049,
+ 0.09376173466444016,
+ 0.08479758352041245,
+ 0.10398479551076889
+ ],
+ "loss_td_T": [
+ 6986,
+ 17208,
+ 27326,
+ 37446,
+ 47446,
+ 57560,
+ 67646,
+ 77674,
+ 87676,
+ 97697,
+ 107788,
+ 117898,
+ 127977,
+ 138187,
+ 148314,
+ 158336,
+ 168474,
+ 178659,
+ 188844,
+ 198937,
+ 209085,
+ 219203,
+ 229205,
+ 239302,
+ 249540,
+ 259654,
+ 269883,
+ 279962,
+ 290103,
+ 300233,
+ 310348,
+ 320404,
+ 330621,
+ 340774,
+ 350782,
+ 360905,
+ 371032,
+ 381218,
+ 391260,
+ 401437,
+ 411519,
+ 421583,
+ 431831,
+ 442080,
+ 452171,
+ 462389,
+ 472545,
+ 482562,
+ 492759,
+ 502876,
+ 513077,
+ 523291,
+ 533582,
+ 543750,
+ 553770,
+ 563908,
+ 573965,
+ 584119,
+ 594318,
+ 604610,
+ 614781,
+ 624841,
+ 634985,
+ 645190,
+ 655224,
+ 665431,
+ 675582,
+ 685795,
+ 695958,
+ 706251,
+ 716474,
+ 726601,
+ 736650,
+ 746776,
+ 757036,
+ 767142,
+ 777302,
+ 787370,
+ 797412,
+ 807474,
+ 817490,
+ 827784,
+ 837975,
+ 848136,
+ 858264,
+ 868484,
+ 878714,
+ 888844,
+ 899094,
+ 909295,
+ 919346,
+ 929608,
+ 939629,
+ 949834,
+ 959847,
+ 969980,
+ 980122,
+ 990167,
+ 1000421,
+ 1010682,
+ 1020899,
+ 1031029,
+ 1041227,
+ 1051318,
+ 1061418,
+ 1071617,
+ 1081746,
+ 1091976,
+ 1102076,
+ 1112333,
+ 1122555,
+ 1132784,
+ 1142944,
+ 1152981,
+ 1163172,
+ 1173184,
+ 1183364,
+ 1193423,
+ 1203440,
+ 1213549,
+ 1223612,
+ 1233681,
+ 1243748,
+ 1253938,
+ 1263975,
+ 1274067,
+ 1284125,
+ 1294253,
+ 1304340,
+ 1314506,
+ 1324563,
+ 1334581,
+ 1344810,
+ 1354992,
+ 1365010,
+ 1375043,
+ 1385083,
+ 1395237,
+ 1405432,
+ 1415708,
+ 1425821,
+ 1435936,
+ 1445973,
+ 1456099,
+ 1466151,
+ 1476185,
+ 1486372,
+ 1496551,
+ 1506712,
+ 1516778,
+ 1527042,
+ 1537198,
+ 1547453,
+ 1557613,
+ 1567733,
+ 1577958,
+ 1588198,
+ 1598438,
+ 1608533,
+ 1618646,
+ 1628879,
+ 1638950,
+ 1649053,
+ 1659258,
+ 1669530,
+ 1679723,
+ 1689942,
+ 1700096,
+ 1710325,
+ 1720334,
+ 1730442,
+ 1740566,
+ 1750671,
+ 1760753,
+ 1770969,
+ 1781219,
+ 1791341,
+ 1801579,
+ 1811774,
+ 1821795,
+ 1831999,
+ 1842201,
+ 1852343,
+ 1862458,
+ 1872561,
+ 1882789,
+ 1893002,
+ 1903009,
+ 1913162,
+ 1923225,
+ 1933463,
+ 1943702,
+ 1953874,
+ 1964002,
+ 1974036,
+ 1984114,
+ 1994349,
+ 2004522,
+ 2014669,
+ 2024770,
+ 2034948,
+ 2045125,
+ 2055287,
+ 2065451,
+ 2075690,
+ 2085862
+ ],
+ "q_taken_mean": [
+ 0.12509066682472086,
+ 0.20525324044358864,
+ 0.3073972004374733,
+ 0.36012185499644384,
+ 0.3975369276055642,
+ 0.5018438400639674,
+ 0.535439801846899,
+ 0.669870271262326,
+ 0.6286381493847014,
+ 0.6700880035143428,
+ 0.7714030569403196,
+ 0.7790175773309391,
+ 0.7860794637418788,
+ 0.8770161811879139,
+ 0.8804806055878599,
+ 0.9124463826605569,
+ 0.936030097301329,
+ 0.9637999911909796,
+ 0.9649146209084837,
+ 0.9553325407608696,
+ 0.9973024186494052,
+ 0.978341971826111,
+ 1.0212419445222751,
+ 1.0300511308111127,
+ 1.0348204664408867,
+ 1.064147733210255,
+ 1.0278850218402142,
+ 1.0986931858188989,
+ 1.1196866325468424,
+ 1.1000108751054556,
+ 1.0959372161869498,
+ 1.1466750860417259,
+ 1.1804252993030637,
+ 1.1571775714190362,
+ 1.151527948943662,
+ 1.1763587813682883,
+ 1.2212712023729946,
+ 1.2031529734157547,
+ 1.147156473459846,
+ 1.1868497367232032,
+ 1.2541672176672838,
+ 1.179210217213673,
+ 1.2294830463085036,
+ 1.2490913481089874,
+ 1.2653309697690218,
+ 1.2322367974477328,
+ 1.2176119503641758,
+ 1.226069233217205,
+ 1.218437037908321,
+ 1.2572880790788763,
+ 1.2068496496429053,
+ 1.2339146130311995,
+ 1.2518125484746638,
+ 1.193329098109872,
+ 1.3282577422493624,
+ 1.3058213401817196,
+ 1.3562361548092976,
+ 1.3403121511360652,
+ 1.3201275562026002,
+ 1.3933813586606567,
+ 1.3156952270330828,
+ 1.4113662113168457,
+ 1.2863937711368367,
+ 1.3416048310054025,
+ 1.3724223336594912,
+ 1.3539165439313317,
+ 1.422978533238856,
+ 1.4233973001380522,
+ 1.4032568878034901,
+ 1.3986616595021173,
+ 1.4087262027270389,
+ 1.4024480634930805,
+ 1.3746623663782134,
+ 1.3621752543176764,
+ 1.4432331837147554,
+ 1.4533488037600604,
+ 1.446225985685585,
+ 1.4166666666666667,
+ 1.432546381277042,
+ 1.484107981861888,
+ 1.3684191389174596,
+ 1.3562152688551972,
+ 1.4245927993992842,
+ 1.3414358186074113,
+ 1.397351505678193,
+ 1.44137752469985,
+ 1.350087707488596,
+ 1.3706508185189923,
+ 1.4063741931401088,
+ 1.4907683856792038,
+ 1.50060505153999,
+ 1.404034612007285,
+ 1.5131070193150513,
+ 1.4713382895977374,
+ 1.527659126984127,
+ 1.4902457341269841,
+ 1.4855177444794954,
+ 1.4792046949185167,
+ 1.4111199803743961,
+ 1.4356178667207184,
+ 1.4234088389820214,
+ 1.3851484164095642,
+ 1.376876412990346,
+ 1.407242643690447,
+ 1.4285132924660144,
+ 1.4836290489372534,
+ 1.4532180105611991,
+ 1.531633806354389,
+ 1.4982739781077772,
+ 1.5256162038438257,
+ 1.469142649962972,
+ 1.5417282795435023,
+ 1.4755044329848503,
+ 1.491678908610272,
+ 1.4866628644047035,
+ 1.480224670031056,
+ 1.5952033125612446,
+ 1.505549160033321,
+ 1.545457063167819,
+ 1.5746177198949756,
+ 1.5893623661412586,
+ 1.5741082197563623,
+ 1.543521025686553,
+ 1.5816906361456156,
+ 1.5486163290454436,
+ 1.5113827173519836,
+ 1.5495814766090765,
+ 1.5370865347172298,
+ 1.519261633321317,
+ 1.5918132360082555,
+ 1.4860642197682838,
+ 1.5303179807329645,
+ 1.5532576141375611,
+ 1.4991656683867798,
+ 1.4975087725307514,
+ 1.5474484848484849,
+ 1.4990336876789123,
+ 1.4993364234810969,
+ 1.4945617219917013,
+ 1.5595924785867237,
+ 1.5778502052594003,
+ 1.5096668757603406,
+ 1.5978986686390533,
+ 1.4709621916338935,
+ 1.563350835392399,
+ 1.5183953660154366,
+ 1.5136022498194728,
+ 1.5140225470389579,
+ 1.569676002217162,
+ 1.5034127697841726,
+ 1.4918122561867608,
+ 1.5427215309772067,
+ 1.462961777267753,
+ 1.4307235333881965,
+ 1.5884740647782958,
+ 1.5821028582686711,
+ 1.585903405666064,
+ 1.5848116664664185,
+ 1.5744863167004894,
+ 1.603167265110712,
+ 1.6041259079194001,
+ 1.6209921474550006,
+ 1.6555069180849762,
+ 1.618074771414822,
+ 1.5706318660926366,
+ 1.6326553098676293,
+ 1.5976954266510812,
+ 1.616747254423429,
+ 1.6105992817141466,
+ 1.5922055529356285,
+ 1.6375305970149254,
+ 1.5741579486949233,
+ 1.626927119577824,
+ 1.5722795349042191,
+ 1.5931387889509991,
+ 1.6709170506240523,
+ 1.5913536076326773,
+ 1.585197330237957,
+ 1.5429936637315564,
+ 1.6866834272813127,
+ 1.5615239923650568,
+ 1.5914027364166767,
+ 1.6539364913238686,
+ 1.6237050990605806,
+ 1.6331023422787194,
+ 1.6317021580520394,
+ 1.5552500356938892,
+ 1.5835783299846353,
+ 1.6179414502227956,
+ 1.6467412851585876,
+ 1.5819176413955627,
+ 1.5548781325963654,
+ 1.5518210378474275,
+ 1.551161408492823,
+ 1.6013726484294097,
+ 1.5872146528026772,
+ 1.5331078321636011,
+ 1.6119196997797611,
+ 1.5679208734444705,
+ 1.4805947220235967,
+ 1.530873355456793,
+ 1.580736480705688,
+ 1.5610918528232935,
+ 1.5549923549715392,
+ 1.5258288702805065,
+ 1.5595438601576994
+ ],
+ "q_taken_mean_T": [
+ 6986,
+ 17208,
+ 27326,
+ 37446,
+ 47446,
+ 57560,
+ 67646,
+ 77674,
+ 87676,
+ 97697,
+ 107788,
+ 117898,
+ 127977,
+ 138187,
+ 148314,
+ 158336,
+ 168474,
+ 178659,
+ 188844,
+ 198937,
+ 209085,
+ 219203,
+ 229205,
+ 239302,
+ 249540,
+ 259654,
+ 269883,
+ 279962,
+ 290103,
+ 300233,
+ 310348,
+ 320404,
+ 330621,
+ 340774,
+ 350782,
+ 360905,
+ 371032,
+ 381218,
+ 391260,
+ 401437,
+ 411519,
+ 421583,
+ 431831,
+ 442080,
+ 452171,
+ 462389,
+ 472545,
+ 482562,
+ 492759,
+ 502876,
+ 513077,
+ 523291,
+ 533582,
+ 543750,
+ 553770,
+ 563908,
+ 573965,
+ 584119,
+ 594318,
+ 604610,
+ 614781,
+ 624841,
+ 634985,
+ 645190,
+ 655224,
+ 665431,
+ 675582,
+ 685795,
+ 695958,
+ 706251,
+ 716474,
+ 726601,
+ 736650,
+ 746776,
+ 757036,
+ 767142,
+ 777302,
+ 787370,
+ 797412,
+ 807474,
+ 817490,
+ 827784,
+ 837975,
+ 848136,
+ 858264,
+ 868484,
+ 878714,
+ 888844,
+ 899094,
+ 909295,
+ 919346,
+ 929608,
+ 939629,
+ 949834,
+ 959847,
+ 969980,
+ 980122,
+ 990167,
+ 1000421,
+ 1010682,
+ 1020899,
+ 1031029,
+ 1041227,
+ 1051318,
+ 1061418,
+ 1071617,
+ 1081746,
+ 1091976,
+ 1102076,
+ 1112333,
+ 1122555,
+ 1132784,
+ 1142944,
+ 1152981,
+ 1163172,
+ 1173184,
+ 1183364,
+ 1193423,
+ 1203440,
+ 1213549,
+ 1223612,
+ 1233681,
+ 1243748,
+ 1253938,
+ 1263975,
+ 1274067,
+ 1284125,
+ 1294253,
+ 1304340,
+ 1314506,
+ 1324563,
+ 1334581,
+ 1344810,
+ 1354992,
+ 1365010,
+ 1375043,
+ 1385083,
+ 1395237,
+ 1405432,
+ 1415708,
+ 1425821,
+ 1435936,
+ 1445973,
+ 1456099,
+ 1466151,
+ 1476185,
+ 1486372,
+ 1496551,
+ 1506712,
+ 1516778,
+ 1527042,
+ 1537198,
+ 1547453,
+ 1557613,
+ 1567733,
+ 1577958,
+ 1588198,
+ 1598438,
+ 1608533,
+ 1618646,
+ 1628879,
+ 1638950,
+ 1649053,
+ 1659258,
+ 1669530,
+ 1679723,
+ 1689942,
+ 1700096,
+ 1710325,
+ 1720334,
+ 1730442,
+ 1740566,
+ 1750671,
+ 1760753,
+ 1770969,
+ 1781219,
+ 1791341,
+ 1801579,
+ 1811774,
+ 1821795,
+ 1831999,
+ 1842201,
+ 1852343,
+ 1862458,
+ 1872561,
+ 1882789,
+ 1893002,
+ 1903009,
+ 1913162,
+ 1923225,
+ 1933463,
+ 1943702,
+ 1953874,
+ 1964002,
+ 1974036,
+ 1984114,
+ 1994349,
+ 2004522,
+ 2014669,
+ 2024770,
+ 2034948,
+ 2045125,
+ 2055287,
+ 2065451,
+ 2075690,
+ 2085862
+ ],
+ "return_max": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.809917355371901
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.605371900826452
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.55339805825243
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.07920792079208
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 20.387254901960784
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.930693069306937
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.683168316831683
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.881188118811874
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.039215686274506
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.881188118811878
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.647058823529406
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.722772277227715
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.427184466019426
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.67961165048544
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.35891089108912
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 26.269801980198018
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.68316831683166
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118811
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.039215686274513
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.492574257425744
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.039215686274492
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.92079207920792
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 25.650485436893216
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207914
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207907
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.861386138613863
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.03921568627451
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207914
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.378640776699044
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.06796116504855
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.26213592233011
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.843137254901947
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207917
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168317
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207914
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.26213592233011
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188112
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.6470588235294
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207917
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.378640776699026
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207928
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.111650485436904
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.843137254901954
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 32.11881188118812
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168324
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.669902912621353
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.88118811881189
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.14851485148514
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.58823529411765
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.568627450980372
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118811
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.92079207920792
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.941747572815533
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.722772277227712
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.72277227722772
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168324
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168306
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.26213592233011
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.31683168316832
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188105
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188112
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.572815533980602
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.138613861386126
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118811
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.14356435643564
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.435643564356425
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168317
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.067961165048544
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 32.07920792079208
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.138613861386126
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188112
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.92079207920792
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.973039215686256
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.31683168316831
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.31683168316831
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188122
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.262135922330106
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.88118811881188
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.84313725490195
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.262135922330106
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.262135922330103
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118811
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168314
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207914
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118811
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207924
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168306
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.45098039215685
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.568627450980376
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.039215686274503
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168306
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188112
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.881188118811888
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.456310679611647
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.067961165048548
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207917
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 32.482673267326746
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.568627450980376
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.766990291262143
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.37254901960782
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.683168316831683
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.039215686274503
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.106796116504867
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.568627450980394
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.039215686274506
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168317
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.881188118811867
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.683168316831683
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.764705882352917
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168306
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207917
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.31683168316831
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.098039215686256
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.420792079207914
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.843137254901944
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.039215686274503
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.03921568627451
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 32.07920792079207
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118812
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.580882352941174
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.335784313725476
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 32.07920792079206
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 32.079207920792065
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188112
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 35.634803921568604
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.88118811881188
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.51456310679611
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168317
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.06796116504856
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.325242718446624
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.722772277227726
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.980198019801982
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118812
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.456310679611654
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188122
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.284653465346523
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.843137254901947
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.84313725490195
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.90099009900989
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 32.17821782178218
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188122
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 32.15841584158415
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.881188118811874
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188112
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.881188118811878
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.372549019607835
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 35.841584158415834
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.960396039603957
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.68203883495147
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.07843137254901
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.18811881188118
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188112
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 32.079207920792065
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 35.90346534653466
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.106796116504864
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.958737864077662
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188112
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.33663366336634
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.448019801980198
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.31683168316832
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.881188118811878
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168314
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.377450980392133
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.198019801980195
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 35.14563106796118
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 32.07920792079208
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.31683168316831
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.26213592233011
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168314
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.320388349514577
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188122
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.722772277227723
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.46116504854372
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.2621359223301
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.725490196078425
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.35643564356435
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 32.07920792079207
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118813
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.372549019607824
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.568627450980365
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.56862745098039
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.568627450980383
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.313725490196063
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.21782178217822
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.88235294117646
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.2621359223301
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.766990291262125
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.25742574257425
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118811
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 32.11633663366335
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.843137254901947
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207928
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.158415841584162
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.647058823529406
+ }
+ ],
+ "return_max_T": [
+ 204,
+ 10383,
+ 20499,
+ 30544,
+ 40732,
+ 50928,
+ 61090,
+ 71149,
+ 81348,
+ 91439,
+ 101594,
+ 111791,
+ 121859,
+ 132012,
+ 142192,
+ 152334,
+ 162438,
+ 172608,
+ 182793,
+ 192903,
+ 203004,
+ 213200,
+ 223268,
+ 233435,
+ 243539,
+ 253728,
+ 263840,
+ 273903,
+ 284085,
+ 294331,
+ 304469,
+ 314577,
+ 324715,
+ 334829,
+ 344974,
+ 355096,
+ 365159,
+ 375218,
+ 385451,
+ 395480,
+ 405603,
+ 415734,
+ 425903,
+ 436024,
+ 446044,
+ 456086,
+ 466137,
+ 476185,
+ 486421,
+ 496587,
+ 506781,
+ 516860,
+ 526869,
+ 537096,
+ 547346,
+ 557559,
+ 567625,
+ 577737,
+ 587967,
+ 598121,
+ 608154,
+ 618172,
+ 628277,
+ 638573,
+ 648758,
+ 658896,
+ 668985,
+ 678986,
+ 689111,
+ 699143,
+ 709193,
+ 719215,
+ 729432,
+ 739537,
+ 749704,
+ 759809,
+ 769880,
+ 779967,
+ 790179,
+ 800377,
+ 810400,
+ 820450,
+ 830717,
+ 840907,
+ 850964,
+ 861069,
+ 871222,
+ 881356,
+ 891470,
+ 901573,
+ 911612,
+ 921754,
+ 931892,
+ 941893,
+ 951969,
+ 962031,
+ 972155,
+ 982347,
+ 992531,
+ 1002678,
+ 1012722,
+ 1022802,
+ 1032827,
+ 1043047,
+ 1053166,
+ 1063204,
+ 1073415,
+ 1083590,
+ 1093731,
+ 1103844,
+ 1113914,
+ 1124075,
+ 1134341,
+ 1144490,
+ 1154545,
+ 1164755,
+ 1175011,
+ 1185189,
+ 1195359,
+ 1205508,
+ 1215725,
+ 1226005,
+ 1236238,
+ 1246442,
+ 1256533,
+ 1266619,
+ 1276736,
+ 1286779,
+ 1297019,
+ 1307046,
+ 1317312,
+ 1327418,
+ 1337430,
+ 1347674,
+ 1357706,
+ 1367819,
+ 1377885,
+ 1388063,
+ 1398132,
+ 1408194,
+ 1418235,
+ 1428421,
+ 1438473,
+ 1448486,
+ 1458511,
+ 1468621,
+ 1478707,
+ 1488809,
+ 1498973,
+ 1509180,
+ 1519321,
+ 1529401,
+ 1539490,
+ 1549607,
+ 1559754,
+ 1569821,
+ 1580005,
+ 1590221,
+ 1600450,
+ 1610609,
+ 1620717,
+ 1630961,
+ 1641221,
+ 1651225,
+ 1661334,
+ 1671558,
+ 1681608,
+ 1691630,
+ 1701665,
+ 1711681,
+ 1721915,
+ 1732035,
+ 1742290,
+ 1752325,
+ 1762529,
+ 1772769,
+ 1782970,
+ 1792991,
+ 1803129,
+ 1813298,
+ 1823322,
+ 1833470,
+ 1843699,
+ 1853838,
+ 1863988,
+ 1874138,
+ 1884357,
+ 1894568,
+ 1904646,
+ 1914776,
+ 1925021,
+ 1935060,
+ 1945284,
+ 1955531,
+ 1965755,
+ 1975880,
+ 1985931,
+ 1996175,
+ 2006263,
+ 2016287,
+ 2026501,
+ 2036692,
+ 2046729,
+ 2056938,
+ 2067110,
+ 2077359,
+ 2087531
+ ],
+ "return_mean": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.38560595390311
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.210515993574214
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.913712418242353
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.653778173833297
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.246229692026177
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.885976017812537
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.563820440350357
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.35573737049407
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 11.563885347981744
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.24631244793283
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.68674492838741
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.145760123361907
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.52731178153707
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.50604434379323
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.189025233271936
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.500967212341314
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.137819603114458
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.270367568716768
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.061148969537612
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.05068050520896
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.33115470624697
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.10187082997551
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.962926056398624
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.450832570767904
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.625576022646051
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.439670006201165
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.1969476243438
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.82890997303268
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.827580326262035
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.325685592598653
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.165038278199079
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.27445102541017
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.460548712000032
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.713535258172545
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.769930011874125
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.731173970134353
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.713211817023403
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.76424730375656
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.413375210640835
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.763758342145676
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.130215424888537
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.569908764889197
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.280138459766953
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.577437519293559
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.477502087695477
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.409812119247876
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.997851742604196
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.820382107894996
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.644514259675615
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.54657460723896
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.767249123465215
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.081757499165835
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.628371525392843
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.17624558384835
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.980181567458235
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.838842942232825
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.860730362210962
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.126570024965183
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.661431517283306
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.50253359468106
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.393947475495242
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.839045687232357
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.81435609636732
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.954833559082116
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.11224312938636
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.934298021563785
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.922102320827516
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.768085313451246
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.616782244881207
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.014883836710762
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.17105328283408
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.777715988773005
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.898419889220747
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.904479409320778
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.58492821060428
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.882625944389908
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.288980677396246
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.847895397308527
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.053105131777187
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.525869170077742
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.133041026485866
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.31376237216954
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.301130807767304
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.26729695240425
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.347967527769306
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.262453102547997
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.260081415939926
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.76781422586528
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.867257184388972
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.98903030958151
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.703705188073112
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.000032428379065
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.1047075329069
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.37233595459012
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.628154374930972
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.258005057850742
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.56567545829135
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.857608853963704
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.09109110269105
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.997004664882116
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.54798971558265
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.762470379088263
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.1243293431293
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.76337801898814
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.796870889568662
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.031777340838126
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.995753454151686
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.621032406500802
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.043546064631744
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.04621182081426
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.4604090618248
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.31439052754034
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.39541879361376
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.896339486667692
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.142136173446495
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.099187683646978
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.20022516470389
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.24185647037045
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.34654182502244
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.576058650310664
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.198668268614252
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.784261950370034
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.685319140085152
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.993213811332538
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.143998587628094
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.472873047334968
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.316240894274923
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.10702768417037
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.25914383486156
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.95465495618875
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.348491185432856
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.881922319346614
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.502252437628396
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.726511692918162
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.869640255863317
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.87348375080016
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.381971580995156
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.626450401322256
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.66220617394038
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.692488100138227
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.41511486255338
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.05131113438879
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.75425194399435
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.14130777418249
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.11871684645214
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.78670179563129
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.593722442595038
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.093243423427726
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.12806515025435
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.998858922948543
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.618342347031085
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.57187377465227
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.25540337742312
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.212574933763293
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.38042487174159
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.050105543129604
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.994785922401945
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.870085135819956
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.547545748468632
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.991577796334827
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.805422535324308
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.743202994746273
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.846347385918737
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.074743808458745
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.898789556818333
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.492691096881433
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.12352214130271
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.553155484001355
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.615174891242233
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.905334374228822
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.4581887910893
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 19.044346601888112
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.99413135406834
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.545967175320943
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.12548368184636
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.095932255113528
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.54962287146123
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.975360969897807
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.347042989125168
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.58557839868099
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.3108488631458
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.17304720515421
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.89499737379686
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.055070742784597
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.231837001620168
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.487170470551206
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.586112523202736
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.897316358124367
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.192692917462548
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.946147861640664
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.416885765787203
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.867655940675842
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.59542395663845
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.34667800183829
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.90088471796424
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.924976155813123
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.4885053238064
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.491417189697547
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.335812704888166
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.948541154256606
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.764471764899533
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.346377299890076
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.132625031396696
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.691489863510622
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.216776590696632
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.214916441320742
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.130184056823747
+ }
+ ],
+ "return_mean_T": [
+ 204,
+ 10383,
+ 20499,
+ 30544,
+ 40732,
+ 50928,
+ 61090,
+ 71149,
+ 81348,
+ 91439,
+ 101594,
+ 111791,
+ 121859,
+ 132012,
+ 142192,
+ 152334,
+ 162438,
+ 172608,
+ 182793,
+ 192903,
+ 203004,
+ 213200,
+ 223268,
+ 233435,
+ 243539,
+ 253728,
+ 263840,
+ 273903,
+ 284085,
+ 294331,
+ 304469,
+ 314577,
+ 324715,
+ 334829,
+ 344974,
+ 355096,
+ 365159,
+ 375218,
+ 385451,
+ 395480,
+ 405603,
+ 415734,
+ 425903,
+ 436024,
+ 446044,
+ 456086,
+ 466137,
+ 476185,
+ 486421,
+ 496587,
+ 506781,
+ 516860,
+ 526869,
+ 537096,
+ 547346,
+ 557559,
+ 567625,
+ 577737,
+ 587967,
+ 598121,
+ 608154,
+ 618172,
+ 628277,
+ 638573,
+ 648758,
+ 658896,
+ 668985,
+ 678986,
+ 689111,
+ 699143,
+ 709193,
+ 719215,
+ 729432,
+ 739537,
+ 749704,
+ 759809,
+ 769880,
+ 779967,
+ 790179,
+ 800377,
+ 810400,
+ 820450,
+ 830717,
+ 840907,
+ 850964,
+ 861069,
+ 871222,
+ 881356,
+ 891470,
+ 901573,
+ 911612,
+ 921754,
+ 931892,
+ 941893,
+ 951969,
+ 962031,
+ 972155,
+ 982347,
+ 992531,
+ 1002678,
+ 1012722,
+ 1022802,
+ 1032827,
+ 1043047,
+ 1053166,
+ 1063204,
+ 1073415,
+ 1083590,
+ 1093731,
+ 1103844,
+ 1113914,
+ 1124075,
+ 1134341,
+ 1144490,
+ 1154545,
+ 1164755,
+ 1175011,
+ 1185189,
+ 1195359,
+ 1205508,
+ 1215725,
+ 1226005,
+ 1236238,
+ 1246442,
+ 1256533,
+ 1266619,
+ 1276736,
+ 1286779,
+ 1297019,
+ 1307046,
+ 1317312,
+ 1327418,
+ 1337430,
+ 1347674,
+ 1357706,
+ 1367819,
+ 1377885,
+ 1388063,
+ 1398132,
+ 1408194,
+ 1418235,
+ 1428421,
+ 1438473,
+ 1448486,
+ 1458511,
+ 1468621,
+ 1478707,
+ 1488809,
+ 1498973,
+ 1509180,
+ 1519321,
+ 1529401,
+ 1539490,
+ 1549607,
+ 1559754,
+ 1569821,
+ 1580005,
+ 1590221,
+ 1600450,
+ 1610609,
+ 1620717,
+ 1630961,
+ 1641221,
+ 1651225,
+ 1661334,
+ 1671558,
+ 1681608,
+ 1691630,
+ 1701665,
+ 1711681,
+ 1721915,
+ 1732035,
+ 1742290,
+ 1752325,
+ 1762529,
+ 1772769,
+ 1782970,
+ 1792991,
+ 1803129,
+ 1813298,
+ 1823322,
+ 1833470,
+ 1843699,
+ 1853838,
+ 1863988,
+ 1874138,
+ 1884357,
+ 1894568,
+ 1904646,
+ 1914776,
+ 1925021,
+ 1935060,
+ 1945284,
+ 1955531,
+ 1965755,
+ 1975880,
+ 1985931,
+ 1996175,
+ 2006263,
+ 2016287,
+ 2026501,
+ 2036692,
+ 2046729,
+ 2056938,
+ 2067110,
+ 2077359,
+ 2087531
+ ],
+ "return_min": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.856435643564356
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.6862745098039216
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.149509803921569
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.4901960784313726
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.7354368932038833
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.834710743801653
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.0049504950495054
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.276960784313726
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.83910891089109
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.669421487603306
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.066115702479339
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.0784313725490176
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.334951456310678
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.460396039603962
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.231404958677686
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.101941747572815
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.793388429752067
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.056930693069308
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.451456310679611
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.198347107438017
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.8429752066115705
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.004854368932037
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.3140495867768593
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.057851239669422
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.682038834951455
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.7004132231404965
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.911157024793389
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.834951456310678
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.801652892561985
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.162621359223299
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.935950413223141
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.475206611570248
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.754132231404958
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.613636363636363
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.101239669421487
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.03883495145631
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.038834951456311
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.07843137254902
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.897058823529412
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.113636363636363
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.849173553719008
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.2970297029703
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.165048543689319
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.745867768595041
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.859504132231405
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.123966942148759
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.823529411764706
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.909090909090908
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.708737864077667
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.865702479338844
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.53719008264463
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.669421487603307
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.983471074380166
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.470297029702973
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.884297520661158
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.953431372549022
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.818181818181819
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.776859504132233
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.019607843137256
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.301470588235295
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.884803921568626
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.975206611570249
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.970297029702973
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.326446280991736
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.841584158415842
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.5206611570247945
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.792079207920793
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.137254901960782
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.816115702479337
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.942148760330578
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.946280991735537
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.960784313725492
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.07920792079208
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.227722772277231
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.101485148514853
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.256198347107437
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.8450413223140485
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.991735537190085
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.25619834710744
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.335784313725489
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.429611650485437
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.332524271844659
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.36893203883495
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.762254901960786
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.404958677685952
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.190082644628099
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.512396694214875
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.0
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.257425742574258
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.258264462809918
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.826446280991738
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.563106796116504
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.247933884297522
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.677184466019416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.68595041322314
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.514462809917355
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.512396694214876
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.047029702970297
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.215686274509801
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.980198019801982
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.561983471074383
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.12396694214876
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.429611650485436
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.274752475247526
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.345588235294117
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.8719008264462795
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.936893203883494
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.973039215686272
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.0
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.0
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.421487603305786
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.683884297520661
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.094660194174756
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.820247933884298
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.553719008264464
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.148760330578514
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.770661157024795
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.81862745098039
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.210743801652894
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.584710743801653
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.62809917355372
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.652892561983471
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.690082644628096
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.925619834710744
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.861570247933885
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.202479338842977
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.442148760330578
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.31404958677686
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.17821782178218
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.0
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.572815533980581
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.068181818181818
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.830882352941175
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.47107438016529
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.9538834951456305
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.58884297520661
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.466019417475726
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.831683168316834
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.09504132231405
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.8801652892562
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.195544554455446
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.407766990291261
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.355371900826448
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.099173553719007
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.072815533980581
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.980392156862745
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.760330578512397
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.594059405940596
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.56198347107438
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.03712871287129
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.62809917355372
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.0
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.760330578512397
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.09917355371901
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.294117647058823
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.5475206611570265
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.180693069306933
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.689320388349512
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.112745098039216
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.082644628099175
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.814049586776859
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.48543689320388
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.764705882352942
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.69421487603306
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.163366336633665
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.999999999999998
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.7706611570247945
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.140495867768595
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.395631067961163
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.03398058252427
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.8987603305785115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.305785123966943
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.7355371900826446
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.0297029702970297
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.888349514563106
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.679611650485436
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.03305785123967
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.471074380165291
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.024793388429751
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.2184466019417455
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.19834710743802
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.088842975206608
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.941176470588236
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.106796116504853
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.45145631067961
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.982673267326734
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.473300970873786
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.444174757281552
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.152912621359224
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.0
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.431372549019609
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.727272727272728
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.75206611570248
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.7272727272727275
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.148514851485151
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.431372549019608
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.2809917355371905
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.225206611570247
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.412621359223301
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.679611650485436
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.002475247524755
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.51980198019802
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.535123966942146
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.107438016528928
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.588235294117649
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.659090909090912
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.132231404958677
+ }
+ ],
+ "return_min_T": [
+ 204,
+ 10383,
+ 20499,
+ 30544,
+ 40732,
+ 50928,
+ 61090,
+ 71149,
+ 81348,
+ 91439,
+ 101594,
+ 111791,
+ 121859,
+ 132012,
+ 142192,
+ 152334,
+ 162438,
+ 172608,
+ 182793,
+ 192903,
+ 203004,
+ 213200,
+ 223268,
+ 233435,
+ 243539,
+ 253728,
+ 263840,
+ 273903,
+ 284085,
+ 294331,
+ 304469,
+ 314577,
+ 324715,
+ 334829,
+ 344974,
+ 355096,
+ 365159,
+ 375218,
+ 385451,
+ 395480,
+ 405603,
+ 415734,
+ 425903,
+ 436024,
+ 446044,
+ 456086,
+ 466137,
+ 476185,
+ 486421,
+ 496587,
+ 506781,
+ 516860,
+ 526869,
+ 537096,
+ 547346,
+ 557559,
+ 567625,
+ 577737,
+ 587967,
+ 598121,
+ 608154,
+ 618172,
+ 628277,
+ 638573,
+ 648758,
+ 658896,
+ 668985,
+ 678986,
+ 689111,
+ 699143,
+ 709193,
+ 719215,
+ 729432,
+ 739537,
+ 749704,
+ 759809,
+ 769880,
+ 779967,
+ 790179,
+ 800377,
+ 810400,
+ 820450,
+ 830717,
+ 840907,
+ 850964,
+ 861069,
+ 871222,
+ 881356,
+ 891470,
+ 901573,
+ 911612,
+ 921754,
+ 931892,
+ 941893,
+ 951969,
+ 962031,
+ 972155,
+ 982347,
+ 992531,
+ 1002678,
+ 1012722,
+ 1022802,
+ 1032827,
+ 1043047,
+ 1053166,
+ 1063204,
+ 1073415,
+ 1083590,
+ 1093731,
+ 1103844,
+ 1113914,
+ 1124075,
+ 1134341,
+ 1144490,
+ 1154545,
+ 1164755,
+ 1175011,
+ 1185189,
+ 1195359,
+ 1205508,
+ 1215725,
+ 1226005,
+ 1236238,
+ 1246442,
+ 1256533,
+ 1266619,
+ 1276736,
+ 1286779,
+ 1297019,
+ 1307046,
+ 1317312,
+ 1327418,
+ 1337430,
+ 1347674,
+ 1357706,
+ 1367819,
+ 1377885,
+ 1388063,
+ 1398132,
+ 1408194,
+ 1418235,
+ 1428421,
+ 1438473,
+ 1448486,
+ 1458511,
+ 1468621,
+ 1478707,
+ 1488809,
+ 1498973,
+ 1509180,
+ 1519321,
+ 1529401,
+ 1539490,
+ 1549607,
+ 1559754,
+ 1569821,
+ 1580005,
+ 1590221,
+ 1600450,
+ 1610609,
+ 1620717,
+ 1630961,
+ 1641221,
+ 1651225,
+ 1661334,
+ 1671558,
+ 1681608,
+ 1691630,
+ 1701665,
+ 1711681,
+ 1721915,
+ 1732035,
+ 1742290,
+ 1752325,
+ 1762529,
+ 1772769,
+ 1782970,
+ 1792991,
+ 1803129,
+ 1813298,
+ 1823322,
+ 1833470,
+ 1843699,
+ 1853838,
+ 1863988,
+ 1874138,
+ 1884357,
+ 1894568,
+ 1904646,
+ 1914776,
+ 1925021,
+ 1935060,
+ 1945284,
+ 1955531,
+ 1965755,
+ 1975880,
+ 1985931,
+ 1996175,
+ 2006263,
+ 2016287,
+ 2026501,
+ 2036692,
+ 2046729,
+ 2056938,
+ 2067110,
+ 2077359,
+ 2087531
+ ],
+ "return_std": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.549514201218565
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.641413148961171
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.878045777299156
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.015263033793251
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.2053916410707677
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.094636358875944
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.876292804452388
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.90550668837076
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.9081934248631285
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.60534340281247
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.013153352603069
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.4019299160062415
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.291255859888523
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.572623902661725
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.282403197823745
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.682610385913361
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.215793330676501
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.204637505250126
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.786044284451325
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.540720192191663
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.751683518560632
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.599917634201636
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.534360166669445
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.793343338892979
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.762633755511537
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.095856290437889
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.994103695750834
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.834376369884504
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.887035454611838
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.3846930312521275
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.995522160933646
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.966474547515319
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.944724317968509
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.930265160238798
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.967823046101393
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.251159031691851
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.913358376699119
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.051414671930567
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.167935702270836
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.867817251803138
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.97908376696465
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.997024405634707
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.990146504811473
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.113356262917007
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.901225065686097
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.9425041842372615
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.0463965935593995
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.957379594985523
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.773497832795247
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.439442346929677
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.267766069666437
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.206955069500706
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.130754541363863
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.371079556715197
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.19320131383037
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.0027073727767055
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.909928196579179
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.22805810862737
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.663053811565599
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.034927148500516
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.50591340913412
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.357802096697422
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.935147241786394
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.995189936441533
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.498726030288638
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.788208841716254
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.344176613945899
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.209128081247822
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.330564572285974
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.043296118901742
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.025601461541455
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.786844402177598
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.860536941885241
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.03535323065991
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.9806462786913075
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.166408885836402
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.16520423653783
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.06888272603151
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.879366693349379
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.1454464682126035
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.137707981231534
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.25691176709222
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.5691314269817385
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.891135104592968
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.977881945252358
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.064813772791109
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.889126047114304
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.781809899580816
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.18884024849777
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.2682837095077675
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.102145696090959
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.235344020661255
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.921696644044391
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.932376014912852
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.211885162894264
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.005738537114017
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.382435000760442
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.760563633342807
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.870796285204547
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.072528190288037
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.799055051773895
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.369017049557993
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.288716984084198
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.324595322572138
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.8770912363033965
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.9748894617046595
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.035058361600058
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.061812235493948
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.344232036483762
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.100984503801796
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.219313036849677
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.1496588550043825
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.788354358169945
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.9652219041598284
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.020401526988684
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.050511564885094
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.191936397464162
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.904142679779648
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.079737424317131
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.121104332279055
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.953555618476075
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.753590971265637
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.327070719744676
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.6661471563917
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.312006337850283
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.908888550533224
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.941932104164424
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.066608238270527
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.927048253640377
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.568996978369843
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.799566461057938
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.291619555647358
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.846675643914054
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.051549415743195
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.281731662101797
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.087584165056943
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.636976555477551
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.825219165799892
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.475453754847076
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.983011569702819
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.965672641517927
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.941498378153676
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.937942853820796
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.907405077623572
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.860148998577336
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.216343694195273
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.19788604426213
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.879224615962759
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.954408591357449
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.839188049983776
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.954165527866229
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.0270138770998125
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.0110071119069275
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.8736864322211755
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.886654798509488
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.261396985661107
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.847665004929471
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.987894135045114
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.500539327025688
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.833507885016636
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.8221253779436175
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.815857958857156
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.201513984940121
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.857820990320232
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.863872071042871
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.5965413125196095
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.541757576258836
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.7947094333118985
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.921083624562544
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.668628503509668
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.963879737412225
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.751124307520843
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.928998599805619
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.03164275466517
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.178665020763071
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.913718014897048
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.8184966884366345
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.0847310123336165
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.009228296596375
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.447980523736028
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.416694345424376
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.5608115403264415
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.997681181087579
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.805726251848097
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.6956913816066095
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.464313274912289
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.152774635759148
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.514240809858553
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.815031749159815
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.351295126802022
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.042542033457909
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.977526555300178
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.802963783585557
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.149029717198158
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.840254303359268
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.796393720591295
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.68532129451663
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.400186768386883
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.658379573584295
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.790432826872658
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.2306849158721835
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.674472540039271
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.634140124006824
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.924167885591022
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.464526447778118
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.623614686364528
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.557825219415144
+ }
+ ],
+ "return_std_T": [
+ 204,
+ 10383,
+ 20499,
+ 30544,
+ 40732,
+ 50928,
+ 61090,
+ 71149,
+ 81348,
+ 91439,
+ 101594,
+ 111791,
+ 121859,
+ 132012,
+ 142192,
+ 152334,
+ 162438,
+ 172608,
+ 182793,
+ 192903,
+ 203004,
+ 213200,
+ 223268,
+ 233435,
+ 243539,
+ 253728,
+ 263840,
+ 273903,
+ 284085,
+ 294331,
+ 304469,
+ 314577,
+ 324715,
+ 334829,
+ 344974,
+ 355096,
+ 365159,
+ 375218,
+ 385451,
+ 395480,
+ 405603,
+ 415734,
+ 425903,
+ 436024,
+ 446044,
+ 456086,
+ 466137,
+ 476185,
+ 486421,
+ 496587,
+ 506781,
+ 516860,
+ 526869,
+ 537096,
+ 547346,
+ 557559,
+ 567625,
+ 577737,
+ 587967,
+ 598121,
+ 608154,
+ 618172,
+ 628277,
+ 638573,
+ 648758,
+ 658896,
+ 668985,
+ 678986,
+ 689111,
+ 699143,
+ 709193,
+ 719215,
+ 729432,
+ 739537,
+ 749704,
+ 759809,
+ 769880,
+ 779967,
+ 790179,
+ 800377,
+ 810400,
+ 820450,
+ 830717,
+ 840907,
+ 850964,
+ 861069,
+ 871222,
+ 881356,
+ 891470,
+ 901573,
+ 911612,
+ 921754,
+ 931892,
+ 941893,
+ 951969,
+ 962031,
+ 972155,
+ 982347,
+ 992531,
+ 1002678,
+ 1012722,
+ 1022802,
+ 1032827,
+ 1043047,
+ 1053166,
+ 1063204,
+ 1073415,
+ 1083590,
+ 1093731,
+ 1103844,
+ 1113914,
+ 1124075,
+ 1134341,
+ 1144490,
+ 1154545,
+ 1164755,
+ 1175011,
+ 1185189,
+ 1195359,
+ 1205508,
+ 1215725,
+ 1226005,
+ 1236238,
+ 1246442,
+ 1256533,
+ 1266619,
+ 1276736,
+ 1286779,
+ 1297019,
+ 1307046,
+ 1317312,
+ 1327418,
+ 1337430,
+ 1347674,
+ 1357706,
+ 1367819,
+ 1377885,
+ 1388063,
+ 1398132,
+ 1408194,
+ 1418235,
+ 1428421,
+ 1438473,
+ 1448486,
+ 1458511,
+ 1468621,
+ 1478707,
+ 1488809,
+ 1498973,
+ 1509180,
+ 1519321,
+ 1529401,
+ 1539490,
+ 1549607,
+ 1559754,
+ 1569821,
+ 1580005,
+ 1590221,
+ 1600450,
+ 1610609,
+ 1620717,
+ 1630961,
+ 1641221,
+ 1651225,
+ 1661334,
+ 1671558,
+ 1681608,
+ 1691630,
+ 1701665,
+ 1711681,
+ 1721915,
+ 1732035,
+ 1742290,
+ 1752325,
+ 1762529,
+ 1772769,
+ 1782970,
+ 1792991,
+ 1803129,
+ 1813298,
+ 1823322,
+ 1833470,
+ 1843699,
+ 1853838,
+ 1863988,
+ 1874138,
+ 1884357,
+ 1894568,
+ 1904646,
+ 1914776,
+ 1925021,
+ 1935060,
+ 1945284,
+ 1955531,
+ 1965755,
+ 1975880,
+ 1985931,
+ 1996175,
+ 2006263,
+ 2016287,
+ 2026501,
+ 2036692,
+ 2046729,
+ 2056938,
+ 2067110,
+ 2077359,
+ 2087531
+ ],
+ "target_mean": [
+ 0.1953035255779416,
+ 0.2147562758522322,
+ 0.2905746505548442,
+ 0.3457650526760313,
+ 0.42620847713070187,
+ 0.49729094176156335,
+ 0.5510767741614861,
+ 0.6130081070548143,
+ 0.6353662293735142,
+ 0.6694694814114457,
+ 0.7799887966694952,
+ 0.7833979875321859,
+ 0.8056947287831618,
+ 0.8643859189604028,
+ 0.8751646486313998,
+ 0.9275353732335245,
+ 0.9040369880661785,
+ 0.951092318534179,
+ 0.9861954116566466,
+ 0.9561376811594203,
+ 0.987309088523443,
+ 0.9900285978817666,
+ 1.0280667729055757,
+ 1.0230993909674717,
+ 1.0421500838122606,
+ 1.0722102723979379,
+ 1.0387423823446527,
+ 1.0960301586044219,
+ 1.1358798143650244,
+ 1.1077153929977503,
+ 1.1202796431815005,
+ 1.1333684448623333,
+ 1.1535808549544577,
+ 1.1545469146308904,
+ 1.1589513644366196,
+ 1.185438060773868,
+ 1.2368666443850267,
+ 1.2105153653222538,
+ 1.1502132537472498,
+ 1.1895470205408036,
+ 1.2239431119412854,
+ 1.1973473886694812,
+ 1.2331509558338827,
+ 1.267419005015589,
+ 1.2378714121942935,
+ 1.2307604194949768,
+ 1.2194913128877798,
+ 1.2358717590111215,
+ 1.2327990573329748,
+ 1.2674394752836304,
+ 1.230315152944347,
+ 1.2310227482853684,
+ 1.2783545477313858,
+ 1.212441358444384,
+ 1.3066395240571735,
+ 1.3152314084960672,
+ 1.3637708480480288,
+ 1.3406759956987129,
+ 1.3316391437567714,
+ 1.3812930618158403,
+ 1.2965257183339924,
+ 1.4191074954680825,
+ 1.2877003934703435,
+ 1.3492891644156015,
+ 1.3706936969993477,
+ 1.3352672067021412,
+ 1.4343046452267456,
+ 1.4294966977284136,
+ 1.39293041856348,
+ 1.3852498837097396,
+ 1.3898887718677644,
+ 1.382255393868529,
+ 1.3867908302048868,
+ 1.3831476718893105,
+ 1.4406238146415475,
+ 1.4480282083123743,
+ 1.4429994545768459,
+ 1.4271556712962963,
+ 1.416007276779861,
+ 1.4773407256805695,
+ 1.3774280221457957,
+ 1.342929401419761,
+ 1.4237124952070552,
+ 1.3380612884082816,
+ 1.3939228579175704,
+ 1.4396297758254126,
+ 1.3826225331031423,
+ 1.4061947459713517,
+ 1.4129601870016453,
+ 1.4593754039033218,
+ 1.4881296960382513,
+ 1.4108992861324314,
+ 1.524495014713248,
+ 1.4443315917661848,
+ 1.5424304563492064,
+ 1.4998857142857143,
+ 1.4776769518927444,
+ 1.4862693282432953,
+ 1.4026819142512077,
+ 1.4461758022015716,
+ 1.4269593886661807,
+ 1.381929312893688,
+ 1.3785709512098252,
+ 1.4099439973735646,
+ 1.4381169800533604,
+ 1.4524705278732342,
+ 1.459033348302775,
+ 1.5416272918498053,
+ 1.4926452464788733,
+ 1.5316167902542372,
+ 1.4660083929893852,
+ 1.5460389119830653,
+ 1.4755853261550018,
+ 1.4887062877643504,
+ 1.4711827458966193,
+ 1.4787430124223602,
+ 1.5849516551322882,
+ 1.5136384672343577,
+ 1.549368911062225,
+ 1.580953313342881,
+ 1.561083108982072,
+ 1.5683125904595345,
+ 1.5641285289417615,
+ 1.5896231436785144,
+ 1.5585629433157009,
+ 1.5074142914204751,
+ 1.5470157751855458,
+ 1.5270915708123651,
+ 1.5398035703556838,
+ 1.5746321855651328,
+ 1.4966448489922761,
+ 1.513841109818959,
+ 1.544762859101204,
+ 1.5097837188229513,
+ 1.5119567120326391,
+ 1.540763446969697,
+ 1.4793033046576813,
+ 1.4891392189576036,
+ 1.501770802459988,
+ 1.569008743754461,
+ 1.5843458100309376,
+ 1.517464644160584,
+ 1.5967274408284025,
+ 1.4899676736980099,
+ 1.5913464084009883,
+ 1.5255268632416787,
+ 1.5190965744975329,
+ 1.5122492574779882,
+ 1.5688556148130393,
+ 1.5103504383992805,
+ 1.4989818892478362,
+ 1.5502230654401068,
+ 1.4577459075571497,
+ 1.4456076960380908,
+ 1.5922524874506991,
+ 1.5302105434650823,
+ 1.561438272302592,
+ 1.6018213835155592,
+ 1.5814969559508176,
+ 1.6066614302812687,
+ 1.567337782626523,
+ 1.6132062820359996,
+ 1.6450282003795764,
+ 1.61345250090231,
+ 1.563795093527316,
+ 1.6292341681708784,
+ 1.6138394944476915,
+ 1.6430449206833435,
+ 1.6089816167518871,
+ 1.5985677390356048,
+ 1.6348798507462687,
+ 1.579186871408046,
+ 1.6262841423103689,
+ 1.582732804971207,
+ 1.6040551037702189,
+ 1.6641443339554416,
+ 1.5893010211687537,
+ 1.5878790626813697,
+ 1.5608482567824846,
+ 1.697723114154173,
+ 1.5306184710878314,
+ 1.5980536647623678,
+ 1.638117908930955,
+ 1.6411489070215586,
+ 1.638946452742467,
+ 1.6389170915377402,
+ 1.5591540548258138,
+ 1.5700731665878738,
+ 1.6086457844746718,
+ 1.622541143028127,
+ 1.591709894815496,
+ 1.5505932399583284,
+ 1.5554915730337078,
+ 1.543243682715311,
+ 1.5907670291105742,
+ 1.5871153041711485,
+ 1.5372755702444336,
+ 1.6067951634403617,
+ 1.5722462725992017,
+ 1.4945757545286618,
+ 1.5419443136331528,
+ 1.570373163500118,
+ 1.5749020099021573,
+ 1.5512928424367203,
+ 1.5310818957864836,
+ 1.5881841372912802
+ ],
+ "target_mean_T": [
+ 6986,
+ 17208,
+ 27326,
+ 37446,
+ 47446,
+ 57560,
+ 67646,
+ 77674,
+ 87676,
+ 97697,
+ 107788,
+ 117898,
+ 127977,
+ 138187,
+ 148314,
+ 158336,
+ 168474,
+ 178659,
+ 188844,
+ 198937,
+ 209085,
+ 219203,
+ 229205,
+ 239302,
+ 249540,
+ 259654,
+ 269883,
+ 279962,
+ 290103,
+ 300233,
+ 310348,
+ 320404,
+ 330621,
+ 340774,
+ 350782,
+ 360905,
+ 371032,
+ 381218,
+ 391260,
+ 401437,
+ 411519,
+ 421583,
+ 431831,
+ 442080,
+ 452171,
+ 462389,
+ 472545,
+ 482562,
+ 492759,
+ 502876,
+ 513077,
+ 523291,
+ 533582,
+ 543750,
+ 553770,
+ 563908,
+ 573965,
+ 584119,
+ 594318,
+ 604610,
+ 614781,
+ 624841,
+ 634985,
+ 645190,
+ 655224,
+ 665431,
+ 675582,
+ 685795,
+ 695958,
+ 706251,
+ 716474,
+ 726601,
+ 736650,
+ 746776,
+ 757036,
+ 767142,
+ 777302,
+ 787370,
+ 797412,
+ 807474,
+ 817490,
+ 827784,
+ 837975,
+ 848136,
+ 858264,
+ 868484,
+ 878714,
+ 888844,
+ 899094,
+ 909295,
+ 919346,
+ 929608,
+ 939629,
+ 949834,
+ 959847,
+ 969980,
+ 980122,
+ 990167,
+ 1000421,
+ 1010682,
+ 1020899,
+ 1031029,
+ 1041227,
+ 1051318,
+ 1061418,
+ 1071617,
+ 1081746,
+ 1091976,
+ 1102076,
+ 1112333,
+ 1122555,
+ 1132784,
+ 1142944,
+ 1152981,
+ 1163172,
+ 1173184,
+ 1183364,
+ 1193423,
+ 1203440,
+ 1213549,
+ 1223612,
+ 1233681,
+ 1243748,
+ 1253938,
+ 1263975,
+ 1274067,
+ 1284125,
+ 1294253,
+ 1304340,
+ 1314506,
+ 1324563,
+ 1334581,
+ 1344810,
+ 1354992,
+ 1365010,
+ 1375043,
+ 1385083,
+ 1395237,
+ 1405432,
+ 1415708,
+ 1425821,
+ 1435936,
+ 1445973,
+ 1456099,
+ 1466151,
+ 1476185,
+ 1486372,
+ 1496551,
+ 1506712,
+ 1516778,
+ 1527042,
+ 1537198,
+ 1547453,
+ 1557613,
+ 1567733,
+ 1577958,
+ 1588198,
+ 1598438,
+ 1608533,
+ 1618646,
+ 1628879,
+ 1638950,
+ 1649053,
+ 1659258,
+ 1669530,
+ 1679723,
+ 1689942,
+ 1700096,
+ 1710325,
+ 1720334,
+ 1730442,
+ 1740566,
+ 1750671,
+ 1760753,
+ 1770969,
+ 1781219,
+ 1791341,
+ 1801579,
+ 1811774,
+ 1821795,
+ 1831999,
+ 1842201,
+ 1852343,
+ 1862458,
+ 1872561,
+ 1882789,
+ 1893002,
+ 1903009,
+ 1913162,
+ 1923225,
+ 1933463,
+ 1943702,
+ 1953874,
+ 1964002,
+ 1974036,
+ 1984114,
+ 1994349,
+ 2004522,
+ 2014669,
+ 2024770,
+ 2034948,
+ 2045125,
+ 2055287,
+ 2065451,
+ 2075690,
+ 2085862
+ ],
+ "td_error_abs": [
+ 0.12952986399696714,
+ 0.0301888753847191,
+ 0.035792812206850634,
+ 0.03706536543996711,
+ 0.05770875316372599,
+ 0.045297241639976976,
+ 0.057230818283700934,
+ 0.10106198693649399,
+ 0.04929080067473081,
+ 0.04307936587686867,
+ 0.055586972275071596,
+ 0.052993983268608554,
+ 0.06001297434924032,
+ 0.06365215225724985,
+ 0.06540509944705317,
+ 0.0698320755211977,
+ 0.07843239403753899,
+ 0.06843740451159487,
+ 0.08329120806717602,
+ 0.06600295357082202,
+ 0.07889227470040237,
+ 0.07440505806105496,
+ 0.08197596948933086,
+ 0.0819617791597227,
+ 0.09418431173991687,
+ 0.0882008353099528,
+ 0.10003215762381992,
+ 0.09546727023482608,
+ 0.09992783365772467,
+ 0.09487673026772532,
+ 0.10917875282721462,
+ 0.09720480120591471,
+ 0.11634595440857456,
+ 0.0955118961764534,
+ 0.09455158932108275,
+ 0.10009670645295195,
+ 0.1003352445714614,
+ 0.091682045447828,
+ 0.09287209925216154,
+ 0.08861605494273947,
+ 0.11207234711282564,
+ 0.0942477070796592,
+ 0.09689187418888019,
+ 0.10213218627946236,
+ 0.10938456991444463,
+ 0.09888958937442727,
+ 0.08441272423792824,
+ 0.08881093668870595,
+ 0.0972549473346762,
+ 0.09214361470045752,
+ 0.10304427008763138,
+ 0.08778671092279115,
+ 0.10175343210719363,
+ 0.1006219909020601,
+ 0.10363547540546152,
+ 0.09426748277308787,
+ 0.09842141938788204,
+ 0.09297870769255237,
+ 0.10858514688642673,
+ 0.1039152736135705,
+ 0.10199360755084767,
+ 0.1007244698389753,
+ 0.09983036545387292,
+ 0.09767334048715658,
+ 0.09562354439314254,
+ 0.09870617377294359,
+ 0.10673593326009324,
+ 0.10371345305538561,
+ 0.09291325956507651,
+ 0.10490181931601276,
+ 0.10468296358203226,
+ 0.10017982377326531,
+ 0.09856314014678512,
+ 0.09623045661164628,
+ 0.10055222886900998,
+ 0.10053295388787804,
+ 0.10345025208653785,
+ 0.09560725809573878,
+ 0.09875987637976105,
+ 0.10360373269308816,
+ 0.09096477858724125,
+ 0.09330710889318065,
+ 0.10539326736044542,
+ 0.0894340345102738,
+ 0.09097266285428895,
+ 0.09611799765849721,
+ 0.10780072296807686,
+ 0.11058027845832268,
+ 0.09297848966902923,
+ 0.10436087158802669,
+ 0.09956949596907212,
+ 0.10417399797852706,
+ 0.09528127470143063,
+ 0.11062862604837759,
+ 0.10242107669890874,
+ 0.10427565414186508,
+ 0.10197504374507098,
+ 0.10496865802041896,
+ 0.10466541529853562,
+ 0.09493318519435262,
+ 0.09206228005990343,
+ 0.09002862085121,
+ 0.098120260395981,
+ 0.09116349172510613,
+ 0.1021086138246808,
+ 0.10073334785064274,
+ 0.10630447493554107,
+ 0.09918243550002748,
+ 0.09411199491445958,
+ 0.09550060059776029,
+ 0.09899230369430388,
+ 0.10320275709634617,
+ 0.10207421104705303,
+ 0.09522480114709214,
+ 0.10811486026926521,
+ 0.11149308672602873,
+ 0.094291855224729,
+ 0.09631936691078921,
+ 0.10086491918653756,
+ 0.09439888551363528,
+ 0.11340086744976537,
+ 0.09217720551652395,
+ 0.11165899218934955,
+ 0.09425497565259501,
+ 0.0963606581030972,
+ 0.10219052989908659,
+ 0.09894206521284524,
+ 0.10254630247180535,
+ 0.09414931748584325,
+ 0.09663713998668357,
+ 0.09378801622512747,
+ 0.10463512977943487,
+ 0.09302869526872988,
+ 0.0878915923836044,
+ 0.0939680496009545,
+ 0.09473444898200757,
+ 0.100102613900454,
+ 0.08698989383436255,
+ 0.09070391516467842,
+ 0.09212058060741583,
+ 0.08187764534094627,
+ 0.08952119924726278,
+ 0.0892822265625,
+ 0.09275429991257374,
+ 0.09452533104767841,
+ 0.08582685353729498,
+ 0.09726907452574016,
+ 0.09218023850221625,
+ 0.09381879621818448,
+ 0.09312296256744604,
+ 0.10094583881754389,
+ 0.10164927233780159,
+ 0.08872817369750502,
+ 0.09679811263740301,
+ 0.0930295998696516,
+ 0.1315134161311833,
+ 0.1337193669002411,
+ 0.12029292299013276,
+ 0.09903752287233646,
+ 0.09572371112451376,
+ 0.1412898799249136,
+ 0.09589672918498182,
+ 0.09708320558822854,
+ 0.10266088314992632,
+ 0.09581393062926812,
+ 0.10263115874840177,
+ 0.09127192271515196,
+ 0.10929912127249848,
+ 0.09422168123307006,
+ 0.09837611512568896,
+ 0.09490285389458955,
+ 0.0990202162000868,
+ 0.09099949620157737,
+ 0.09102153699621357,
+ 0.10069850555042445,
+ 0.09744609769133471,
+ 0.09116481719588551,
+ 0.09100459403792441,
+ 0.09865892892335495,
+ 0.08605183193948471,
+ 0.1097052747553045,
+ 0.10269363494201637,
+ 0.09025290995716918,
+ 0.09172568079449822,
+ 0.08693130406956215,
+ 0.09693792801869067,
+ 0.08520610990148486,
+ 0.08427298811504624,
+ 0.09101413920046465,
+ 0.0910593394673848,
+ 0.08361319649259254,
+ 0.07744954379982853,
+ 0.07569767045664548,
+ 0.09109756980786483,
+ 0.0774809530422995,
+ 0.07142316185189958,
+ 0.07914120876315948,
+ 0.08527810523556131,
+ 0.0767289372390636,
+ 0.09350537202480709,
+ 0.07895987528420798,
+ 0.07730248752618303,
+ 0.07760208203631527,
+ 0.09376173838296294,
+ 0.08479758231142369,
+ 0.10398479737686978
+ ],
+ "td_error_abs_T": [
+ 6986,
+ 17208,
+ 27326,
+ 37446,
+ 47446,
+ 57560,
+ 67646,
+ 77674,
+ 87676,
+ 97697,
+ 107788,
+ 117898,
+ 127977,
+ 138187,
+ 148314,
+ 158336,
+ 168474,
+ 178659,
+ 188844,
+ 198937,
+ 209085,
+ 219203,
+ 229205,
+ 239302,
+ 249540,
+ 259654,
+ 269883,
+ 279962,
+ 290103,
+ 300233,
+ 310348,
+ 320404,
+ 330621,
+ 340774,
+ 350782,
+ 360905,
+ 371032,
+ 381218,
+ 391260,
+ 401437,
+ 411519,
+ 421583,
+ 431831,
+ 442080,
+ 452171,
+ 462389,
+ 472545,
+ 482562,
+ 492759,
+ 502876,
+ 513077,
+ 523291,
+ 533582,
+ 543750,
+ 553770,
+ 563908,
+ 573965,
+ 584119,
+ 594318,
+ 604610,
+ 614781,
+ 624841,
+ 634985,
+ 645190,
+ 655224,
+ 665431,
+ 675582,
+ 685795,
+ 695958,
+ 706251,
+ 716474,
+ 726601,
+ 736650,
+ 746776,
+ 757036,
+ 767142,
+ 777302,
+ 787370,
+ 797412,
+ 807474,
+ 817490,
+ 827784,
+ 837975,
+ 848136,
+ 858264,
+ 868484,
+ 878714,
+ 888844,
+ 899094,
+ 909295,
+ 919346,
+ 929608,
+ 939629,
+ 949834,
+ 959847,
+ 969980,
+ 980122,
+ 990167,
+ 1000421,
+ 1010682,
+ 1020899,
+ 1031029,
+ 1041227,
+ 1051318,
+ 1061418,
+ 1071617,
+ 1081746,
+ 1091976,
+ 1102076,
+ 1112333,
+ 1122555,
+ 1132784,
+ 1142944,
+ 1152981,
+ 1163172,
+ 1173184,
+ 1183364,
+ 1193423,
+ 1203440,
+ 1213549,
+ 1223612,
+ 1233681,
+ 1243748,
+ 1253938,
+ 1263975,
+ 1274067,
+ 1284125,
+ 1294253,
+ 1304340,
+ 1314506,
+ 1324563,
+ 1334581,
+ 1344810,
+ 1354992,
+ 1365010,
+ 1375043,
+ 1385083,
+ 1395237,
+ 1405432,
+ 1415708,
+ 1425821,
+ 1435936,
+ 1445973,
+ 1456099,
+ 1466151,
+ 1476185,
+ 1486372,
+ 1496551,
+ 1506712,
+ 1516778,
+ 1527042,
+ 1537198,
+ 1547453,
+ 1557613,
+ 1567733,
+ 1577958,
+ 1588198,
+ 1598438,
+ 1608533,
+ 1618646,
+ 1628879,
+ 1638950,
+ 1649053,
+ 1659258,
+ 1669530,
+ 1679723,
+ 1689942,
+ 1700096,
+ 1710325,
+ 1720334,
+ 1730442,
+ 1740566,
+ 1750671,
+ 1760753,
+ 1770969,
+ 1781219,
+ 1791341,
+ 1801579,
+ 1811774,
+ 1821795,
+ 1831999,
+ 1842201,
+ 1852343,
+ 1862458,
+ 1872561,
+ 1882789,
+ 1893002,
+ 1903009,
+ 1913162,
+ 1923225,
+ 1933463,
+ 1943702,
+ 1953874,
+ 1964002,
+ 1974036,
+ 1984114,
+ 1994349,
+ 2004522,
+ 2014669,
+ 2024770,
+ 2034948,
+ 2045125,
+ 2055287,
+ 2065451,
+ 2075690,
+ 2085862
+ ],
+ "test_battle_won_mean": [
+ 0.0,
+ 0.09375,
+ 0.09375,
+ 0.15625,
+ 0.15625,
+ 0.25,
+ 0.125,
+ 0.09375,
+ 0.1875,
+ 0.125,
+ 0.25,
+ 0.09375,
+ 0.21875,
+ 0.15625,
+ 0.09375,
+ 0.09375,
+ 0.0625,
+ 0.21875,
+ 0.15625,
+ 0.3125,
+ 0.125,
+ 0.28125,
+ 0.28125,
+ 0.21875,
+ 0.25,
+ 0.21875,
+ 0.21875,
+ 0.3125,
+ 0.3125,
+ 0.1875,
+ 0.21875,
+ 0.28125,
+ 0.15625,
+ 0.1875,
+ 0.375,
+ 0.46875,
+ 0.28125,
+ 0.21875,
+ 0.1875,
+ 0.25,
+ 0.3125,
+ 0.40625,
+ 0.28125,
+ 0.25,
+ 0.46875,
+ 0.375,
+ 0.40625,
+ 0.5,
+ 0.40625,
+ 0.3125,
+ 0.375,
+ 0.34375,
+ 0.40625,
+ 0.46875,
+ 0.28125,
+ 0.375,
+ 0.21875,
+ 0.40625,
+ 0.375,
+ 0.34375,
+ 0.28125,
+ 0.40625,
+ 0.46875,
+ 0.46875,
+ 0.21875,
+ 0.375,
+ 0.4375,
+ 0.21875,
+ 0.28125,
+ 0.375,
+ 0.5,
+ 0.375,
+ 0.375,
+ 0.28125,
+ 0.375,
+ 0.375,
+ 0.34375,
+ 0.34375,
+ 0.46875,
+ 0.5,
+ 0.4375,
+ 0.375,
+ 0.3125,
+ 0.4375,
+ 0.40625,
+ 0.34375,
+ 0.53125,
+ 0.40625,
+ 0.375,
+ 0.46875,
+ 0.46875,
+ 0.46875,
+ 0.4375,
+ 0.5,
+ 0.5625,
+ 0.5,
+ 0.625,
+ 0.5625,
+ 0.46875,
+ 0.53125,
+ 0.625,
+ 0.6875,
+ 0.5,
+ 0.59375,
+ 0.59375,
+ 0.5625,
+ 0.375,
+ 0.4375,
+ 0.34375,
+ 0.5625,
+ 0.53125,
+ 0.59375,
+ 0.625,
+ 0.5,
+ 0.5625,
+ 0.53125,
+ 0.46875,
+ 0.5625,
+ 0.4375,
+ 0.5625,
+ 0.53125,
+ 0.375,
+ 0.75,
+ 0.53125,
+ 0.65625,
+ 0.5625,
+ 0.71875,
+ 0.78125,
+ 0.59375,
+ 0.59375,
+ 0.375,
+ 0.46875,
+ 0.5625,
+ 0.53125,
+ 0.5,
+ 0.78125,
+ 0.46875,
+ 0.75,
+ 0.46875,
+ 0.5625,
+ 0.65625,
+ 0.59375,
+ 0.65625,
+ 0.5,
+ 0.65625,
+ 0.5625,
+ 0.6875,
+ 0.71875,
+ 0.59375,
+ 0.6875,
+ 0.65625,
+ 0.75,
+ 0.53125,
+ 0.40625,
+ 0.71875,
+ 0.59375,
+ 0.5625,
+ 0.625,
+ 0.53125,
+ 0.6875,
+ 0.53125,
+ 0.75,
+ 0.6875,
+ 0.53125,
+ 0.5,
+ 0.71875,
+ 0.53125,
+ 0.6875,
+ 0.78125,
+ 0.6875,
+ 0.625,
+ 0.59375,
+ 0.65625,
+ 0.625,
+ 0.84375,
+ 0.6875,
+ 0.75,
+ 0.625,
+ 0.5625,
+ 0.75,
+ 0.6875,
+ 0.53125,
+ 0.59375,
+ 0.71875,
+ 0.78125,
+ 0.59375,
+ 0.625,
+ 0.90625,
+ 0.6875,
+ 0.6875,
+ 0.625,
+ 0.65625,
+ 0.65625,
+ 0.5,
+ 0.65625,
+ 0.65625,
+ 0.75,
+ 0.6875,
+ 0.625,
+ 0.53125,
+ 0.59375,
+ 0.46875,
+ 0.5625,
+ 0.46875,
+ 0.8125,
+ 0.5625,
+ 0.625
+ ],
+ "test_battle_won_mean_T": [
+ 204,
+ 10383,
+ 20499,
+ 30544,
+ 40732,
+ 50928,
+ 61090,
+ 71149,
+ 81348,
+ 91439,
+ 101594,
+ 111791,
+ 121859,
+ 132012,
+ 142192,
+ 152334,
+ 162438,
+ 172608,
+ 182793,
+ 192903,
+ 203004,
+ 213200,
+ 223268,
+ 233435,
+ 243539,
+ 253728,
+ 263840,
+ 273903,
+ 284085,
+ 294331,
+ 304469,
+ 314577,
+ 324715,
+ 334829,
+ 344974,
+ 355096,
+ 365159,
+ 375218,
+ 385451,
+ 395480,
+ 405603,
+ 415734,
+ 425903,
+ 436024,
+ 446044,
+ 456086,
+ 466137,
+ 476185,
+ 486421,
+ 496587,
+ 506781,
+ 516860,
+ 526869,
+ 537096,
+ 547346,
+ 557559,
+ 567625,
+ 577737,
+ 587967,
+ 598121,
+ 608154,
+ 618172,
+ 628277,
+ 638573,
+ 648758,
+ 658896,
+ 668985,
+ 678986,
+ 689111,
+ 699143,
+ 709193,
+ 719215,
+ 729432,
+ 739537,
+ 749704,
+ 759809,
+ 769880,
+ 779967,
+ 790179,
+ 800377,
+ 810400,
+ 820450,
+ 830717,
+ 840907,
+ 850964,
+ 861069,
+ 871222,
+ 881356,
+ 891470,
+ 901573,
+ 911612,
+ 921754,
+ 931892,
+ 941893,
+ 951969,
+ 962031,
+ 972155,
+ 982347,
+ 992531,
+ 1002678,
+ 1012722,
+ 1022802,
+ 1032827,
+ 1043047,
+ 1053166,
+ 1063204,
+ 1073415,
+ 1083590,
+ 1093731,
+ 1103844,
+ 1113914,
+ 1124075,
+ 1134341,
+ 1144490,
+ 1154545,
+ 1164755,
+ 1175011,
+ 1185189,
+ 1195359,
+ 1205508,
+ 1215725,
+ 1226005,
+ 1236238,
+ 1246442,
+ 1256533,
+ 1266619,
+ 1276736,
+ 1286779,
+ 1297019,
+ 1307046,
+ 1317312,
+ 1327418,
+ 1337430,
+ 1347674,
+ 1357706,
+ 1367819,
+ 1377885,
+ 1388063,
+ 1398132,
+ 1408194,
+ 1418235,
+ 1428421,
+ 1438473,
+ 1448486,
+ 1458511,
+ 1468621,
+ 1478707,
+ 1488809,
+ 1498973,
+ 1509180,
+ 1519321,
+ 1529401,
+ 1539490,
+ 1549607,
+ 1559754,
+ 1569821,
+ 1580005,
+ 1590221,
+ 1600450,
+ 1610609,
+ 1620717,
+ 1630961,
+ 1641221,
+ 1651225,
+ 1661334,
+ 1671558,
+ 1681608,
+ 1691630,
+ 1701665,
+ 1711681,
+ 1721915,
+ 1732035,
+ 1742290,
+ 1752325,
+ 1762529,
+ 1772769,
+ 1782970,
+ 1792991,
+ 1803129,
+ 1813298,
+ 1823322,
+ 1833470,
+ 1843699,
+ 1853838,
+ 1863988,
+ 1874138,
+ 1884357,
+ 1894568,
+ 1904646,
+ 1914776,
+ 1925021,
+ 1935060,
+ 1945284,
+ 1955531,
+ 1965755,
+ 1975880,
+ 1985931,
+ 1996175,
+ 2006263,
+ 2016287,
+ 2026501,
+ 2036692,
+ 2046729,
+ 2056938,
+ 2067110,
+ 2077359,
+ 2087531
+ ],
+ "test_dead_allies_mean": [
+ 5.0,
+ 4.8125,
+ 4.78125,
+ 4.46875,
+ 4.625,
+ 4.40625,
+ 4.71875,
+ 4.875,
+ 4.625,
+ 4.8125,
+ 4.375,
+ 4.75,
+ 4.5,
+ 4.59375,
+ 4.8125,
+ 4.71875,
+ 4.875,
+ 4.5,
+ 4.625,
+ 4.21875,
+ 4.6875,
+ 4.4375,
+ 4.3125,
+ 4.59375,
+ 4.375,
+ 4.40625,
+ 4.40625,
+ 4.28125,
+ 4.3125,
+ 4.59375,
+ 4.59375,
+ 4.28125,
+ 4.625,
+ 4.625,
+ 4.15625,
+ 3.84375,
+ 4.25,
+ 4.5,
+ 4.59375,
+ 4.5,
+ 4.1875,
+ 4.09375,
+ 4.40625,
+ 4.1875,
+ 4.03125,
+ 4.09375,
+ 4.0625,
+ 3.75,
+ 3.90625,
+ 4.15625,
+ 4.25,
+ 4.21875,
+ 4.09375,
+ 3.8125,
+ 4.40625,
+ 4.09375,
+ 4.5625,
+ 4.125,
+ 4.15625,
+ 4.09375,
+ 4.4375,
+ 4.125,
+ 3.9375,
+ 3.875,
+ 4.5,
+ 4.3125,
+ 4.03125,
+ 4.40625,
+ 4.34375,
+ 4.28125,
+ 3.78125,
+ 4.1875,
+ 4.21875,
+ 4.3125,
+ 4.1875,
+ 4.03125,
+ 4.28125,
+ 4.125,
+ 3.875,
+ 4.0,
+ 4.0,
+ 4.0625,
+ 4.3125,
+ 3.9375,
+ 3.9375,
+ 4.25,
+ 3.5,
+ 4.125,
+ 4.15625,
+ 3.96875,
+ 4.125,
+ 4.0,
+ 3.90625,
+ 3.875,
+ 3.53125,
+ 4.15625,
+ 3.4375,
+ 3.78125,
+ 4.0,
+ 3.6875,
+ 3.375,
+ 3.46875,
+ 3.78125,
+ 3.625,
+ 3.5,
+ 3.5,
+ 4.25,
+ 4.0,
+ 4.09375,
+ 3.5,
+ 3.78125,
+ 3.65625,
+ 3.4375,
+ 3.71875,
+ 3.78125,
+ 3.96875,
+ 3.9375,
+ 3.59375,
+ 3.96875,
+ 3.59375,
+ 3.78125,
+ 4.25,
+ 3.0625,
+ 3.78125,
+ 3.65625,
+ 3.625,
+ 3.1875,
+ 3.375,
+ 3.71875,
+ 3.6875,
+ 4.21875,
+ 3.90625,
+ 3.5625,
+ 3.65625,
+ 3.71875,
+ 3.28125,
+ 3.84375,
+ 3.09375,
+ 3.9375,
+ 3.78125,
+ 3.4375,
+ 3.65625,
+ 3.28125,
+ 3.6875,
+ 3.375,
+ 3.53125,
+ 3.40625,
+ 3.1875,
+ 3.5625,
+ 3.5625,
+ 3.3125,
+ 3.0625,
+ 3.625,
+ 3.84375,
+ 3.1875,
+ 3.46875,
+ 3.59375,
+ 3.21875,
+ 3.59375,
+ 3.40625,
+ 3.6875,
+ 3.375,
+ 3.46875,
+ 3.8125,
+ 3.8125,
+ 3.5,
+ 3.65625,
+ 3.28125,
+ 3.28125,
+ 3.375,
+ 3.59375,
+ 3.59375,
+ 3.5625,
+ 3.53125,
+ 3.0,
+ 3.125,
+ 3.125,
+ 3.53125,
+ 3.40625,
+ 3.1875,
+ 3.1875,
+ 3.875,
+ 3.625,
+ 3.53125,
+ 2.90625,
+ 3.53125,
+ 3.375,
+ 2.8125,
+ 3.28125,
+ 3.5,
+ 3.625,
+ 3.4375,
+ 3.59375,
+ 3.71875,
+ 3.40625,
+ 3.375,
+ 3.28125,
+ 3.15625,
+ 3.4375,
+ 3.84375,
+ 3.5625,
+ 3.75,
+ 3.6875,
+ 3.875,
+ 3.125,
+ 3.625,
+ 3.46875
+ ],
+ "test_dead_allies_mean_T": [
+ 204,
+ 10383,
+ 20499,
+ 30544,
+ 40732,
+ 50928,
+ 61090,
+ 71149,
+ 81348,
+ 91439,
+ 101594,
+ 111791,
+ 121859,
+ 132012,
+ 142192,
+ 152334,
+ 162438,
+ 172608,
+ 182793,
+ 192903,
+ 203004,
+ 213200,
+ 223268,
+ 233435,
+ 243539,
+ 253728,
+ 263840,
+ 273903,
+ 284085,
+ 294331,
+ 304469,
+ 314577,
+ 324715,
+ 334829,
+ 344974,
+ 355096,
+ 365159,
+ 375218,
+ 385451,
+ 395480,
+ 405603,
+ 415734,
+ 425903,
+ 436024,
+ 446044,
+ 456086,
+ 466137,
+ 476185,
+ 486421,
+ 496587,
+ 506781,
+ 516860,
+ 526869,
+ 537096,
+ 547346,
+ 557559,
+ 567625,
+ 577737,
+ 587967,
+ 598121,
+ 608154,
+ 618172,
+ 628277,
+ 638573,
+ 648758,
+ 658896,
+ 668985,
+ 678986,
+ 689111,
+ 699143,
+ 709193,
+ 719215,
+ 729432,
+ 739537,
+ 749704,
+ 759809,
+ 769880,
+ 779967,
+ 790179,
+ 800377,
+ 810400,
+ 820450,
+ 830717,
+ 840907,
+ 850964,
+ 861069,
+ 871222,
+ 881356,
+ 891470,
+ 901573,
+ 911612,
+ 921754,
+ 931892,
+ 941893,
+ 951969,
+ 962031,
+ 972155,
+ 982347,
+ 992531,
+ 1002678,
+ 1012722,
+ 1022802,
+ 1032827,
+ 1043047,
+ 1053166,
+ 1063204,
+ 1073415,
+ 1083590,
+ 1093731,
+ 1103844,
+ 1113914,
+ 1124075,
+ 1134341,
+ 1144490,
+ 1154545,
+ 1164755,
+ 1175011,
+ 1185189,
+ 1195359,
+ 1205508,
+ 1215725,
+ 1226005,
+ 1236238,
+ 1246442,
+ 1256533,
+ 1266619,
+ 1276736,
+ 1286779,
+ 1297019,
+ 1307046,
+ 1317312,
+ 1327418,
+ 1337430,
+ 1347674,
+ 1357706,
+ 1367819,
+ 1377885,
+ 1388063,
+ 1398132,
+ 1408194,
+ 1418235,
+ 1428421,
+ 1438473,
+ 1448486,
+ 1458511,
+ 1468621,
+ 1478707,
+ 1488809,
+ 1498973,
+ 1509180,
+ 1519321,
+ 1529401,
+ 1539490,
+ 1549607,
+ 1559754,
+ 1569821,
+ 1580005,
+ 1590221,
+ 1600450,
+ 1610609,
+ 1620717,
+ 1630961,
+ 1641221,
+ 1651225,
+ 1661334,
+ 1671558,
+ 1681608,
+ 1691630,
+ 1701665,
+ 1711681,
+ 1721915,
+ 1732035,
+ 1742290,
+ 1752325,
+ 1762529,
+ 1772769,
+ 1782970,
+ 1792991,
+ 1803129,
+ 1813298,
+ 1823322,
+ 1833470,
+ 1843699,
+ 1853838,
+ 1863988,
+ 1874138,
+ 1884357,
+ 1894568,
+ 1904646,
+ 1914776,
+ 1925021,
+ 1935060,
+ 1945284,
+ 1955531,
+ 1965755,
+ 1975880,
+ 1985931,
+ 1996175,
+ 2006263,
+ 2016287,
+ 2026501,
+ 2036692,
+ 2046729,
+ 2056938,
+ 2067110,
+ 2077359,
+ 2087531
+ ],
+ "test_dead_enemies_mean": [
+ 0.5625,
+ 2.0,
+ 2.0625,
+ 2.1875,
+ 2.46875,
+ 2.6875,
+ 2.5625,
+ 2.0,
+ 2.5,
+ 2.5,
+ 2.96875,
+ 1.875,
+ 2.46875,
+ 1.96875,
+ 2.28125,
+ 1.9375,
+ 2.21875,
+ 2.65625,
+ 2.59375,
+ 3.0,
+ 2.3125,
+ 2.875,
+ 2.65625,
+ 2.6875,
+ 2.71875,
+ 2.8125,
+ 2.75,
+ 2.96875,
+ 3.25,
+ 2.375,
+ 2.84375,
+ 2.84375,
+ 2.75,
+ 3.09375,
+ 3.375,
+ 3.78125,
+ 3.3125,
+ 2.71875,
+ 2.8125,
+ 3.125,
+ 3.34375,
+ 3.6875,
+ 3.03125,
+ 2.8125,
+ 3.59375,
+ 3.4375,
+ 3.6875,
+ 3.5,
+ 3.28125,
+ 3.5,
+ 3.5,
+ 3.5625,
+ 3.625,
+ 4.03125,
+ 3.4375,
+ 3.5625,
+ 3.15625,
+ 3.59375,
+ 3.53125,
+ 3.5625,
+ 3.40625,
+ 3.46875,
+ 3.65625,
+ 3.875,
+ 3.125,
+ 3.46875,
+ 3.6875,
+ 3.28125,
+ 3.59375,
+ 3.71875,
+ 3.8125,
+ 3.65625,
+ 3.65625,
+ 3.3125,
+ 3.5625,
+ 3.28125,
+ 3.59375,
+ 3.65625,
+ 3.875,
+ 3.71875,
+ 3.9375,
+ 3.59375,
+ 3.4375,
+ 3.8125,
+ 3.84375,
+ 3.65625,
+ 3.96875,
+ 3.875,
+ 3.71875,
+ 3.84375,
+ 3.84375,
+ 3.8125,
+ 3.78125,
+ 4.1875,
+ 4.15625,
+ 4.03125,
+ 4.375,
+ 4.3125,
+ 4.03125,
+ 3.84375,
+ 4.125,
+ 4.4375,
+ 3.9375,
+ 4.125,
+ 4.3125,
+ 4.3125,
+ 3.9375,
+ 3.71875,
+ 3.625,
+ 4.125,
+ 4.25,
+ 4.3125,
+ 4.21875,
+ 3.84375,
+ 4.25,
+ 4.0625,
+ 3.96875,
+ 4.15625,
+ 3.90625,
+ 4.21875,
+ 4.0625,
+ 3.8125,
+ 4.4375,
+ 4.15625,
+ 4.28125,
+ 4.09375,
+ 4.4375,
+ 4.5,
+ 4.09375,
+ 4.1875,
+ 3.6875,
+ 3.90625,
+ 4.125,
+ 4.09375,
+ 4.09375,
+ 4.65625,
+ 4.0,
+ 4.4375,
+ 3.875,
+ 4.25,
+ 4.40625,
+ 4.40625,
+ 4.40625,
+ 3.96875,
+ 4.40625,
+ 4.0625,
+ 4.375,
+ 4.375,
+ 4.15625,
+ 4.28125,
+ 4.4375,
+ 4.4375,
+ 4.03125,
+ 3.78125,
+ 4.59375,
+ 4.1875,
+ 4.125,
+ 4.21875,
+ 4.25,
+ 4.28125,
+ 3.71875,
+ 4.5,
+ 4.28125,
+ 4.03125,
+ 3.71875,
+ 4.4375,
+ 4.1875,
+ 4.34375,
+ 4.5,
+ 4.375,
+ 4.0625,
+ 4.25,
+ 4.40625,
+ 4.28125,
+ 4.71875,
+ 4.25,
+ 4.59375,
+ 4.25,
+ 4.3125,
+ 4.40625,
+ 4.53125,
+ 4.1875,
+ 4.0,
+ 4.46875,
+ 4.53125,
+ 4.21875,
+ 4.4375,
+ 4.8125,
+ 4.40625,
+ 4.34375,
+ 4.4375,
+ 4.4375,
+ 4.34375,
+ 4.125,
+ 4.34375,
+ 4.375,
+ 4.46875,
+ 4.5,
+ 4.125,
+ 3.9375,
+ 4.15625,
+ 3.96875,
+ 4.1875,
+ 3.9375,
+ 4.5625,
+ 4.15625,
+ 4.3125
+ ],
+ "test_dead_enemies_mean_T": [
+ 204,
+ 10383,
+ 20499,
+ 30544,
+ 40732,
+ 50928,
+ 61090,
+ 71149,
+ 81348,
+ 91439,
+ 101594,
+ 111791,
+ 121859,
+ 132012,
+ 142192,
+ 152334,
+ 162438,
+ 172608,
+ 182793,
+ 192903,
+ 203004,
+ 213200,
+ 223268,
+ 233435,
+ 243539,
+ 253728,
+ 263840,
+ 273903,
+ 284085,
+ 294331,
+ 304469,
+ 314577,
+ 324715,
+ 334829,
+ 344974,
+ 355096,
+ 365159,
+ 375218,
+ 385451,
+ 395480,
+ 405603,
+ 415734,
+ 425903,
+ 436024,
+ 446044,
+ 456086,
+ 466137,
+ 476185,
+ 486421,
+ 496587,
+ 506781,
+ 516860,
+ 526869,
+ 537096,
+ 547346,
+ 557559,
+ 567625,
+ 577737,
+ 587967,
+ 598121,
+ 608154,
+ 618172,
+ 628277,
+ 638573,
+ 648758,
+ 658896,
+ 668985,
+ 678986,
+ 689111,
+ 699143,
+ 709193,
+ 719215,
+ 729432,
+ 739537,
+ 749704,
+ 759809,
+ 769880,
+ 779967,
+ 790179,
+ 800377,
+ 810400,
+ 820450,
+ 830717,
+ 840907,
+ 850964,
+ 861069,
+ 871222,
+ 881356,
+ 891470,
+ 901573,
+ 911612,
+ 921754,
+ 931892,
+ 941893,
+ 951969,
+ 962031,
+ 972155,
+ 982347,
+ 992531,
+ 1002678,
+ 1012722,
+ 1022802,
+ 1032827,
+ 1043047,
+ 1053166,
+ 1063204,
+ 1073415,
+ 1083590,
+ 1093731,
+ 1103844,
+ 1113914,
+ 1124075,
+ 1134341,
+ 1144490,
+ 1154545,
+ 1164755,
+ 1175011,
+ 1185189,
+ 1195359,
+ 1205508,
+ 1215725,
+ 1226005,
+ 1236238,
+ 1246442,
+ 1256533,
+ 1266619,
+ 1276736,
+ 1286779,
+ 1297019,
+ 1307046,
+ 1317312,
+ 1327418,
+ 1337430,
+ 1347674,
+ 1357706,
+ 1367819,
+ 1377885,
+ 1388063,
+ 1398132,
+ 1408194,
+ 1418235,
+ 1428421,
+ 1438473,
+ 1448486,
+ 1458511,
+ 1468621,
+ 1478707,
+ 1488809,
+ 1498973,
+ 1509180,
+ 1519321,
+ 1529401,
+ 1539490,
+ 1549607,
+ 1559754,
+ 1569821,
+ 1580005,
+ 1590221,
+ 1600450,
+ 1610609,
+ 1620717,
+ 1630961,
+ 1641221,
+ 1651225,
+ 1661334,
+ 1671558,
+ 1681608,
+ 1691630,
+ 1701665,
+ 1711681,
+ 1721915,
+ 1732035,
+ 1742290,
+ 1752325,
+ 1762529,
+ 1772769,
+ 1782970,
+ 1792991,
+ 1803129,
+ 1813298,
+ 1823322,
+ 1833470,
+ 1843699,
+ 1853838,
+ 1863988,
+ 1874138,
+ 1884357,
+ 1894568,
+ 1904646,
+ 1914776,
+ 1925021,
+ 1935060,
+ 1945284,
+ 1955531,
+ 1965755,
+ 1975880,
+ 1985931,
+ 1996175,
+ 2006263,
+ 2016287,
+ 2026501,
+ 2036692,
+ 2046729,
+ 2056938,
+ 2067110,
+ 2077359,
+ 2087531
+ ],
+ "test_ep_length_mean": [
+ 59.03125,
+ 64.84375,
+ 65.75,
+ 62.84375,
+ 61.53125,
+ 60.59375,
+ 59.6875,
+ 55.78125,
+ 59.875,
+ 58.78125,
+ 58.09375,
+ 55.46875,
+ 57.03125,
+ 53.09375,
+ 59.75,
+ 56.3125,
+ 56.625,
+ 55.4375,
+ 56.125,
+ 57.65625,
+ 53.59375,
+ 57.25,
+ 53.78125,
+ 57.5625,
+ 55.75,
+ 57.125,
+ 56.40625,
+ 56.59375,
+ 60.09375,
+ 56.0,
+ 59.34375,
+ 56.90625,
+ 55.6875,
+ 61.59375,
+ 61.15625,
+ 59.09375,
+ 58.0,
+ 57.03125,
+ 58.28125,
+ 59.96875,
+ 58.53125,
+ 62.28125,
+ 59.40625,
+ 56.40625,
+ 60.90625,
+ 59.53125,
+ 66.59375,
+ 59.84375,
+ 56.0625,
+ 60.90625,
+ 63.6875,
+ 60.15625,
+ 62.8125,
+ 64.0625,
+ 59.46875,
+ 58.90625,
+ 57.09375,
+ 62.09375,
+ 62.65625,
+ 60.6875,
+ 65.65625,
+ 62.09375,
+ 59.28125,
+ 60.9375,
+ 61.75,
+ 62.84375,
+ 59.78125,
+ 59.46875,
+ 66.40625,
+ 62.59375,
+ 58.9375,
+ 62.625,
+ 58.25,
+ 59.25,
+ 62.96875,
+ 58.125,
+ 62.4375,
+ 60.84375,
+ 62.65625,
+ 60.3125,
+ 65.28125,
+ 59.9375,
+ 60.46875,
+ 60.875,
+ 62.1875,
+ 64.15625,
+ 58.59375,
+ 63.6875,
+ 62.125,
+ 62.4375,
+ 61.96875,
+ 62.6875,
+ 62.96875,
+ 65.75,
+ 65.3125,
+ 66.78125,
+ 64.84375,
+ 69.625,
+ 65.4375,
+ 60.65625,
+ 61.28125,
+ 64.03125,
+ 64.59375,
+ 65.34375,
+ 64.34375,
+ 68.0,
+ 63.9375,
+ 62.5,
+ 62.4375,
+ 66.875,
+ 67.125,
+ 67.59375,
+ 64.34375,
+ 63.03125,
+ 66.21875,
+ 67.625,
+ 67.78125,
+ 64.75,
+ 68.15625,
+ 61.84375,
+ 67.3125,
+ 64.84375,
+ 65.09375,
+ 66.5,
+ 66.71875,
+ 60.9375,
+ 63.28125,
+ 64.1875,
+ 62.6875,
+ 66.46875,
+ 64.28125,
+ 66.84375,
+ 64.625,
+ 65.09375,
+ 65.75,
+ 66.78125,
+ 65.75,
+ 66.09375,
+ 68.21875,
+ 66.90625,
+ 65.90625,
+ 67.125,
+ 65.09375,
+ 65.125,
+ 63.84375,
+ 64.8125,
+ 65.34375,
+ 63.53125,
+ 63.03125,
+ 65.03125,
+ 64.84375,
+ 65.46875,
+ 62.5625,
+ 61.65625,
+ 64.75,
+ 67.46875,
+ 66.28125,
+ 62.1875,
+ 65.71875,
+ 65.53125,
+ 64.8125,
+ 67.0625,
+ 64.0625,
+ 67.625,
+ 62.5,
+ 65.9375,
+ 67.78125,
+ 66.71875,
+ 66.5625,
+ 63.8125,
+ 64.46875,
+ 64.46875,
+ 70.28125,
+ 70.96875,
+ 69.9375,
+ 65.25,
+ 67.0625,
+ 64.625,
+ 65.8125,
+ 67.1875,
+ 65.875,
+ 68.15625,
+ 61.65625,
+ 65.4375,
+ 65.1875,
+ 66.0,
+ 67.40625,
+ 66.1875,
+ 66.84375,
+ 66.96875,
+ 73.34375,
+ 65.59375,
+ 66.0625,
+ 66.75,
+ 66.09375,
+ 69.3125,
+ 63.34375,
+ 68.71875,
+ 60.59375,
+ 63.53125,
+ 67.46875,
+ 64.90625,
+ 66.4375,
+ 64.4375,
+ 67.21875,
+ 66.75,
+ 68.15625
+ ],
+ "test_ep_length_mean_T": [
+ 204,
+ 10383,
+ 20499,
+ 30544,
+ 40732,
+ 50928,
+ 61090,
+ 71149,
+ 81348,
+ 91439,
+ 101594,
+ 111791,
+ 121859,
+ 132012,
+ 142192,
+ 152334,
+ 162438,
+ 172608,
+ 182793,
+ 192903,
+ 203004,
+ 213200,
+ 223268,
+ 233435,
+ 243539,
+ 253728,
+ 263840,
+ 273903,
+ 284085,
+ 294331,
+ 304469,
+ 314577,
+ 324715,
+ 334829,
+ 344974,
+ 355096,
+ 365159,
+ 375218,
+ 385451,
+ 395480,
+ 405603,
+ 415734,
+ 425903,
+ 436024,
+ 446044,
+ 456086,
+ 466137,
+ 476185,
+ 486421,
+ 496587,
+ 506781,
+ 516860,
+ 526869,
+ 537096,
+ 547346,
+ 557559,
+ 567625,
+ 577737,
+ 587967,
+ 598121,
+ 608154,
+ 618172,
+ 628277,
+ 638573,
+ 648758,
+ 658896,
+ 668985,
+ 678986,
+ 689111,
+ 699143,
+ 709193,
+ 719215,
+ 729432,
+ 739537,
+ 749704,
+ 759809,
+ 769880,
+ 779967,
+ 790179,
+ 800377,
+ 810400,
+ 820450,
+ 830717,
+ 840907,
+ 850964,
+ 861069,
+ 871222,
+ 881356,
+ 891470,
+ 901573,
+ 911612,
+ 921754,
+ 931892,
+ 941893,
+ 951969,
+ 962031,
+ 972155,
+ 982347,
+ 992531,
+ 1002678,
+ 1012722,
+ 1022802,
+ 1032827,
+ 1043047,
+ 1053166,
+ 1063204,
+ 1073415,
+ 1083590,
+ 1093731,
+ 1103844,
+ 1113914,
+ 1124075,
+ 1134341,
+ 1144490,
+ 1154545,
+ 1164755,
+ 1175011,
+ 1185189,
+ 1195359,
+ 1205508,
+ 1215725,
+ 1226005,
+ 1236238,
+ 1246442,
+ 1256533,
+ 1266619,
+ 1276736,
+ 1286779,
+ 1297019,
+ 1307046,
+ 1317312,
+ 1327418,
+ 1337430,
+ 1347674,
+ 1357706,
+ 1367819,
+ 1377885,
+ 1388063,
+ 1398132,
+ 1408194,
+ 1418235,
+ 1428421,
+ 1438473,
+ 1448486,
+ 1458511,
+ 1468621,
+ 1478707,
+ 1488809,
+ 1498973,
+ 1509180,
+ 1519321,
+ 1529401,
+ 1539490,
+ 1549607,
+ 1559754,
+ 1569821,
+ 1580005,
+ 1590221,
+ 1600450,
+ 1610609,
+ 1620717,
+ 1630961,
+ 1641221,
+ 1651225,
+ 1661334,
+ 1671558,
+ 1681608,
+ 1691630,
+ 1701665,
+ 1711681,
+ 1721915,
+ 1732035,
+ 1742290,
+ 1752325,
+ 1762529,
+ 1772769,
+ 1782970,
+ 1792991,
+ 1803129,
+ 1813298,
+ 1823322,
+ 1833470,
+ 1843699,
+ 1853838,
+ 1863988,
+ 1874138,
+ 1884357,
+ 1894568,
+ 1904646,
+ 1914776,
+ 1925021,
+ 1935060,
+ 1945284,
+ 1955531,
+ 1965755,
+ 1975880,
+ 1985931,
+ 1996175,
+ 2006263,
+ 2016287,
+ 2026501,
+ 2036692,
+ 2046729,
+ 2056938,
+ 2067110,
+ 2077359,
+ 2087531
+ ],
+ "test_return_max": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.269607843137255
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 20.254132231404963
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.921568627450974
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.15841584158415
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.356435643564353
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118812
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.72549019607842
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 32.78676470588235
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.067961165048562
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.980198019801975
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.84313725490195
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 21.305825242718466
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.1980198019802
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.117647058823522
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 20.792079207920793
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.960396039603957
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.137254901960787
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.378640776699037
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.999999999999993
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.6470588235294
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.356435643564353
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.921568627450974
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188112
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.31372549019607
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.725490196078415
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.960784313725483
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.92079207920792
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.572815533980595
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.17821782178217
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.842233009708742
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.15841584158416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.96039603960396
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.222772277227726
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.6470588235294
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.960396039603957
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.64705882352941
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.57281553398058
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.6470588235294
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.11764705882352
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 25.009900990099
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.647058823529402
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.921568627450974
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.073529411764703
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.764705882352924
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.11764705882352
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.27184466019419
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.995049504950497
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188122
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.158415841584166
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.554455445544555
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.94117647058823
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.88349514563107
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.88118811881189
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.356435643564346
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.12990196078431
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.15841584158416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.529411764705884
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.37864077669903
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.57281553398059
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.138613861386133
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.475247524752476
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.378640776699037
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.163366336633665
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.26213592233011
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 26.611570247933898
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.470588235294105
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.450980392156854
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.35643564356436
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.647058823529402
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207914
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.45631067961164
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.572815533980588
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.378640776699037
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.554455445544562
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.96813725490195
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.26213592233011
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.6470588235294
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.15841584158417
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168314
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.356435643564357
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.039215686274503
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.982673267326724
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118811
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.2156862745098
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.15841584158416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.647058823529402
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.36881188118812
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.813725490196063
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168324
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.15841584158416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.31372549019607
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.356435643564353
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.843137254901947
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 26.44628099173554
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.35643564356436
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.262135922330117
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.37864077669903
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.40048543689321
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.554455445544555
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168317
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.158415841584166
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.92079207920792
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118811
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.881188118811856
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.647058823529406
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.647058823529406
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.378640776699037
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.68316831683168
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.23019801980199
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.137254901960794
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.57281553398059
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.92079207920792
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.72277227722772
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.73300970873787
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.56435643564356
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.843137254901954
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.685643564356432
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.37254901960783
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.766990291262147
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.26213592233013
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207924
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 25.01960784313726
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.41421568627451
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.57281553398058
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207914
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.184466019417485
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.262135922330117
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.15841584158416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.572815533980595
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.450980392156865
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.2621359223301
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118811
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.1188118811881
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.15841584158416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.163366336633665
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.80147058823528
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188105
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.117647058823522
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.647058823529406
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.6470588235294
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188122
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.881188118811878
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.843137254901954
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.647058823529406
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.17821782178218
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.51732673267326
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207917
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.592233009708735
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.6470588235294
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207914
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.35643564356435
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.76470588235293
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.37376237623762
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.6470588235294
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 25.087378640776706
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207914
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.316831683168317
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207917
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.92079207920792
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.259803921568626
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.039215686274513
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207928
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.31683168316831
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.475247524752476
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.862745098039206
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.666666666666654
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.689320388349515
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.84313725490197
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.92079207920792
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.806930693069297
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.537128712871294
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.881188118811885
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.57281553398059
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.15841584158415
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.357843137254896
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.84313725490195
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.188725490196077
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.11881188118811
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.35643564356436
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.6470588235294
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.84313725490194
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.84313725490195
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.15841584158416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.647058823529413
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207907
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.56862745098038
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 32.079207920792065
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.843137254901947
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.198019801980205
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 24.356435643564353
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.881188118811874
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.378640776699044
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.920792079207917
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.862745098039213
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.118811881188115
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.96039603960395
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.722772277227712
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 31.568627450980372
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.572815533980595
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.96078431372549
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 23.98514851485149
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.647058823529413
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 28.31683168316831
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.76699029126214
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 27.74257425742573
+ }
+ ],
+ "test_return_max_T": [
+ 204,
+ 10383,
+ 20499,
+ 30544,
+ 40732,
+ 50928,
+ 61090,
+ 71149,
+ 81348,
+ 91439,
+ 101594,
+ 111791,
+ 121859,
+ 132012,
+ 142192,
+ 152334,
+ 162438,
+ 172608,
+ 182793,
+ 192903,
+ 203004,
+ 213200,
+ 223268,
+ 233435,
+ 243539,
+ 253728,
+ 263840,
+ 273903,
+ 284085,
+ 294331,
+ 304469,
+ 314577,
+ 324715,
+ 334829,
+ 344974,
+ 355096,
+ 365159,
+ 375218,
+ 385451,
+ 395480,
+ 405603,
+ 415734,
+ 425903,
+ 436024,
+ 446044,
+ 456086,
+ 466137,
+ 476185,
+ 486421,
+ 496587,
+ 506781,
+ 516860,
+ 526869,
+ 537096,
+ 547346,
+ 557559,
+ 567625,
+ 577737,
+ 587967,
+ 598121,
+ 608154,
+ 618172,
+ 628277,
+ 638573,
+ 648758,
+ 658896,
+ 668985,
+ 678986,
+ 689111,
+ 699143,
+ 709193,
+ 719215,
+ 729432,
+ 739537,
+ 749704,
+ 759809,
+ 769880,
+ 779967,
+ 790179,
+ 800377,
+ 810400,
+ 820450,
+ 830717,
+ 840907,
+ 850964,
+ 861069,
+ 871222,
+ 881356,
+ 891470,
+ 901573,
+ 911612,
+ 921754,
+ 931892,
+ 941893,
+ 951969,
+ 962031,
+ 972155,
+ 982347,
+ 992531,
+ 1002678,
+ 1012722,
+ 1022802,
+ 1032827,
+ 1043047,
+ 1053166,
+ 1063204,
+ 1073415,
+ 1083590,
+ 1093731,
+ 1103844,
+ 1113914,
+ 1124075,
+ 1134341,
+ 1144490,
+ 1154545,
+ 1164755,
+ 1175011,
+ 1185189,
+ 1195359,
+ 1205508,
+ 1215725,
+ 1226005,
+ 1236238,
+ 1246442,
+ 1256533,
+ 1266619,
+ 1276736,
+ 1286779,
+ 1297019,
+ 1307046,
+ 1317312,
+ 1327418,
+ 1337430,
+ 1347674,
+ 1357706,
+ 1367819,
+ 1377885,
+ 1388063,
+ 1398132,
+ 1408194,
+ 1418235,
+ 1428421,
+ 1438473,
+ 1448486,
+ 1458511,
+ 1468621,
+ 1478707,
+ 1488809,
+ 1498973,
+ 1509180,
+ 1519321,
+ 1529401,
+ 1539490,
+ 1549607,
+ 1559754,
+ 1569821,
+ 1580005,
+ 1590221,
+ 1600450,
+ 1610609,
+ 1620717,
+ 1630961,
+ 1641221,
+ 1651225,
+ 1661334,
+ 1671558,
+ 1681608,
+ 1691630,
+ 1701665,
+ 1711681,
+ 1721915,
+ 1732035,
+ 1742290,
+ 1752325,
+ 1762529,
+ 1772769,
+ 1782970,
+ 1792991,
+ 1803129,
+ 1813298,
+ 1823322,
+ 1833470,
+ 1843699,
+ 1853838,
+ 1863988,
+ 1874138,
+ 1884357,
+ 1894568,
+ 1904646,
+ 1914776,
+ 1925021,
+ 1935060,
+ 1945284,
+ 1955531,
+ 1965755,
+ 1975880,
+ 1985931,
+ 1996175,
+ 2006263,
+ 2016287,
+ 2026501,
+ 2036692,
+ 2046729,
+ 2056938,
+ 2067110,
+ 2077359,
+ 2087531
+ ],
+ "test_return_mean": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.322554031555419
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.764063392712318
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.647921427251397
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.397873197043369
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.610038942190844
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.57199398904995
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.70523614299657
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.093158023582168
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.427101002115165
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.330960092331953
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.779171476428704
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.47677780368651
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.035166243665898
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.731271840723577
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.560126550168485
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.738185042285444
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.49590750984592
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.437499173380719
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.533448952052321
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.956310471268301
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 13.492552918722026
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.335390494652756
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.521440928809245
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.849043103216953
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.078829202048723
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.855648469384445
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.305423937280093
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.195482494301364
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.413085040129776
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.181057262429718
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.954879985939707
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.10166307452792
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.077210206633593
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.919021951440545
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.803846480198938
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.19122041996104
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.876553694499691
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.509998217746759
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.44845855354593
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.819169301168346
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.934820635682769
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.07198920488056
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.869708447371139
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.829245423873129
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.89053906238354
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.882736301522494
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.923819192126693
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.779813750789312
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.732110149322914
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.841483705522961
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.32515431721869
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.491414734004135
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.939410368398967
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.46866353568376
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.718122968292105
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.429816443237698
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.967129379214661
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.437704779487415
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.36241505284199
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.242906176435326
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.300624062912494
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.771906356842656
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.982703467437855
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.326235406609428
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.488086948163012
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.232849764569288
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.552311652698748
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 14.857829324772208
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.952952203529245
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.338408259745684
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.896606028928137
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.672756609454783
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.99561546078061
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.163673444895144
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.526086575246424
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.537852556027012
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.583247918887473
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.61117962946287
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.41506752958569
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.47290316163645
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.693774839480504
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.592807785447956
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 15.565895424960408
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.057606964817666
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.99733639024921
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.35864920119144
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.484508169492315
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.850224734703655
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.68865401467494
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.528898231081026
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.236473331340814
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.74760998608103
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.549070923285186
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.761962523343023
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.193213585918457
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.399121331861686
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.730189434461693
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.811255491049664
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.673336092816687
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.482012142699816
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.742771020196876
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.998839983466674
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.94722596768886
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.405664127908494
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.45821474769307
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 19.21467762775739
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.251714254327737
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.48543408507139
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.82134522391774
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.769387697049876
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.642959488042774
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.53414522841328
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.67822293732946
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.19605151181734
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.945494807490782
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.883193619012104
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.28283911128737
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.39391165591853
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.546837750594023
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.967533154714346
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.289275104045892
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.66631781580467
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 19.27089893739322
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.29655079559667
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.486888444576575
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.982582924866946
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 19.09201669387758
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 19.52729881002271
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.474270119097767
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.406140222160097
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.453802994962064
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.825271029411688
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.96589086652471
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.26983647392243
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.214822598586245
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 19.68648701558039
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.08999119563633
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 19.309590263490357
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.365205838562872
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.18177099643016
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.886588479547004
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 19.097363325819597
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 19.249226981363478
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.265232713200128
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.79448779682999
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.46555247390264
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.501930637821815
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.78379736529592
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.44514105234556
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.562666361314175
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.753631001294703
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 19.532350707436375
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.442919063506025
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.65550912763516
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 19.241598502210593
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.05983746342435
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.7027046740428
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.407580173542087
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.628597454181715
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.4809031000082
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.68575171408797
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 19.05847350302839
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 19.164058805799783
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.43144265546207
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.157749339520425
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 19.630825103401
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.632982663215248
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 19.324371058421264
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 19.548995879579913
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.574566458320206
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.665827643955893
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.462879238798546
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.094538081422723
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.51715846389783
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 19.969863191425965
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.53357368169177
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.918473275087134
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 19.00671806093601
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.007989985108885
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 19.81584242238749
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 19.84318388754729
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.185215333095535
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.409352714660518
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 19.116000976217116
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 19.299306628617117
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.87426375579043
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.450435346600116
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 20.66627659144607
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 19.677528892079962
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 19.020731187346477
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 19.40604189310376
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.44335161065056
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 19.33056555300214
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.40472879824017
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.420306285855307
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.479363802983315
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 19.185665570014443
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 19.991909816160085
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.71326038075032
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 17.37328861440931
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.137630714545615
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.779193162148236
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.078570501610415
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 16.910910544508
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 19.89257902369367
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.41451223833088
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 18.796608294589067
+ }
+ ],
+ "test_return_mean_T": [
+ 204,
+ 10383,
+ 20499,
+ 30544,
+ 40732,
+ 50928,
+ 61090,
+ 71149,
+ 81348,
+ 91439,
+ 101594,
+ 111791,
+ 121859,
+ 132012,
+ 142192,
+ 152334,
+ 162438,
+ 172608,
+ 182793,
+ 192903,
+ 203004,
+ 213200,
+ 223268,
+ 233435,
+ 243539,
+ 253728,
+ 263840,
+ 273903,
+ 284085,
+ 294331,
+ 304469,
+ 314577,
+ 324715,
+ 334829,
+ 344974,
+ 355096,
+ 365159,
+ 375218,
+ 385451,
+ 395480,
+ 405603,
+ 415734,
+ 425903,
+ 436024,
+ 446044,
+ 456086,
+ 466137,
+ 476185,
+ 486421,
+ 496587,
+ 506781,
+ 516860,
+ 526869,
+ 537096,
+ 547346,
+ 557559,
+ 567625,
+ 577737,
+ 587967,
+ 598121,
+ 608154,
+ 618172,
+ 628277,
+ 638573,
+ 648758,
+ 658896,
+ 668985,
+ 678986,
+ 689111,
+ 699143,
+ 709193,
+ 719215,
+ 729432,
+ 739537,
+ 749704,
+ 759809,
+ 769880,
+ 779967,
+ 790179,
+ 800377,
+ 810400,
+ 820450,
+ 830717,
+ 840907,
+ 850964,
+ 861069,
+ 871222,
+ 881356,
+ 891470,
+ 901573,
+ 911612,
+ 921754,
+ 931892,
+ 941893,
+ 951969,
+ 962031,
+ 972155,
+ 982347,
+ 992531,
+ 1002678,
+ 1012722,
+ 1022802,
+ 1032827,
+ 1043047,
+ 1053166,
+ 1063204,
+ 1073415,
+ 1083590,
+ 1093731,
+ 1103844,
+ 1113914,
+ 1124075,
+ 1134341,
+ 1144490,
+ 1154545,
+ 1164755,
+ 1175011,
+ 1185189,
+ 1195359,
+ 1205508,
+ 1215725,
+ 1226005,
+ 1236238,
+ 1246442,
+ 1256533,
+ 1266619,
+ 1276736,
+ 1286779,
+ 1297019,
+ 1307046,
+ 1317312,
+ 1327418,
+ 1337430,
+ 1347674,
+ 1357706,
+ 1367819,
+ 1377885,
+ 1388063,
+ 1398132,
+ 1408194,
+ 1418235,
+ 1428421,
+ 1438473,
+ 1448486,
+ 1458511,
+ 1468621,
+ 1478707,
+ 1488809,
+ 1498973,
+ 1509180,
+ 1519321,
+ 1529401,
+ 1539490,
+ 1549607,
+ 1559754,
+ 1569821,
+ 1580005,
+ 1590221,
+ 1600450,
+ 1610609,
+ 1620717,
+ 1630961,
+ 1641221,
+ 1651225,
+ 1661334,
+ 1671558,
+ 1681608,
+ 1691630,
+ 1701665,
+ 1711681,
+ 1721915,
+ 1732035,
+ 1742290,
+ 1752325,
+ 1762529,
+ 1772769,
+ 1782970,
+ 1792991,
+ 1803129,
+ 1813298,
+ 1823322,
+ 1833470,
+ 1843699,
+ 1853838,
+ 1863988,
+ 1874138,
+ 1884357,
+ 1894568,
+ 1904646,
+ 1914776,
+ 1925021,
+ 1935060,
+ 1945284,
+ 1955531,
+ 1965755,
+ 1975880,
+ 1985931,
+ 1996175,
+ 2006263,
+ 2016287,
+ 2026501,
+ 2036692,
+ 2046729,
+ 2056938,
+ 2067110,
+ 2077359,
+ 2087531
+ ],
+ "test_return_min": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.0
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.871287128712872
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.76470588235294
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.651960784313725
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.058823529411764
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.03305785123967
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.283057851239667
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.888429752066116
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.311983471074379
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.607438016528923
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.37603305785124
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.039603960396043
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.5396039603960405
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.3801652892561975
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.440082644628098
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.692148760330577
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.3801652892562
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.31404958677686
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.544554455445545
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.7727272727272725
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.993801652892561
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.117768595041321
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.504854368932038
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.059405940594063
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.728155339805823
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.892561983471072
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.097087378640774
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.817961165048543
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.68595041322314
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.407766990291261
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.919117647058822
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.568181818181819
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.355371900826446
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.634297520661157
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.958677685950416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.776859504132231
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.264462809917356
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.377450980392156
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.836776859504133
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.3651960784313735
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.688016528925619
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.04950495049505
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.909090909090912
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.16504854368932
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.152892561983469
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.905339805825241
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.099173553719007
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.685950413223141
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.347107438016528
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.141089108910892
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.024793388429755
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.256198347107439
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.411764705882354
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.489669421487603
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.929611650485434
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.272727272727272
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.991735537190081
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.975206611570247
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.157024793388431
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.520661157024794
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.2438016528925635
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.107438016528928
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.190594059405942
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.428217821782182
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.940082644628102
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.876213592233007
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.900826446280993
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.32231404958678
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.876033057851241
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.85123966942149
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.487603305785124
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.142561983471076
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.883495145631068
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.138429752066116
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.239669421487605
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.506198347107441
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.347107438016527
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.173267326732675
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.526859504132231
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.566115702479339
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.509803921568627
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.609504132231404
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.603305785123966
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.766990291262134
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.67892156862745
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.681372549019607
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.941176470588234
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.839108910891092
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.759708737864077
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.822314049586776
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.611570247933885
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.045454545454542
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.99029126213592
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.171487603305787
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.862745098039216
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.453431372549018
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.183884297520665
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.836776859504134
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.946078431372548
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.018595041322317
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.157024793388432
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.735537190082647
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.862745098039215
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.65909090909091
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.627450980392156
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.152892561983471
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.079207920792081
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.933884297520663
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.445544554455449
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.570388349514563
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 11.090686274509807
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.681372549019611
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.57281553398058
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.074380165289258
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.198347107438018
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.05392156862745
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.79126213592233
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.91504854368932
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.63636363636364
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.694214876033058
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.181818181818183
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.22314049586777
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.019607843137257
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.3960396039604
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.941176470588237
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.710743801652892
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.138429752066116
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.19834710743802
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.082644628099176
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.769801980198022
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.93388429752066
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.6694214876033056
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.727272727272727
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.499999999999998
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.031553398058259
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 11.213235294117647
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.3921568627451
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.059405940594063
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.750000000000002
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.831683168316834
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.420792079207924
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.239669421487601
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 11.487745098039216
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.413366336633667
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.2970297029703
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.702479338842974
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.619834710743803
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.574380165289254
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.242718446601943
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.078431372549018
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.458677685950416
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.551652892561986
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.665289256198347
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.820247933884298
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 11.076446280991735
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.933884297520663
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.617768595041323
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.555825242718444
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.900990099009903
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.92156862745098
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.912621359223301
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 11.37864077669903
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.983471074380168
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.601941747572814
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.0776699029126195
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.434466019417478
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 11.213592233009708
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.873966942148762
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.888429752066116
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.532178217821786
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.396039603960398
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.803921568627452
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.29611650485437
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.1322314049586755
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.347107438016526
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.983471074380164
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 11.095041322314046
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.500000000000002
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.074380165289258
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.857438016528924
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 12.297520661157021
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 11.072815533980584
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.338842975206608
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.917355371900825
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.502475247524755
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.140495867768597
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 11.601485148514854
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.504854368932037
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.580097087378645
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.378712871287131
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 11.675619834710744
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 11.719008264462808
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.121287128712872
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.963235294117649
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.289256198347108
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.15702479338843
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 11.766990291262138
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 11.090909090909092
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.87809917355372
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.324257425742578
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.942148760330581
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.629901960784311
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 11.11029411764706
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.029126213592232
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.442148760330578
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.433168316831683
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.1735537190082646
+ }
+ ],
+ "test_return_min_T": [
+ 204,
+ 10383,
+ 20499,
+ 30544,
+ 40732,
+ 50928,
+ 61090,
+ 71149,
+ 81348,
+ 91439,
+ 101594,
+ 111791,
+ 121859,
+ 132012,
+ 142192,
+ 152334,
+ 162438,
+ 172608,
+ 182793,
+ 192903,
+ 203004,
+ 213200,
+ 223268,
+ 233435,
+ 243539,
+ 253728,
+ 263840,
+ 273903,
+ 284085,
+ 294331,
+ 304469,
+ 314577,
+ 324715,
+ 334829,
+ 344974,
+ 355096,
+ 365159,
+ 375218,
+ 385451,
+ 395480,
+ 405603,
+ 415734,
+ 425903,
+ 436024,
+ 446044,
+ 456086,
+ 466137,
+ 476185,
+ 486421,
+ 496587,
+ 506781,
+ 516860,
+ 526869,
+ 537096,
+ 547346,
+ 557559,
+ 567625,
+ 577737,
+ 587967,
+ 598121,
+ 608154,
+ 618172,
+ 628277,
+ 638573,
+ 648758,
+ 658896,
+ 668985,
+ 678986,
+ 689111,
+ 699143,
+ 709193,
+ 719215,
+ 729432,
+ 739537,
+ 749704,
+ 759809,
+ 769880,
+ 779967,
+ 790179,
+ 800377,
+ 810400,
+ 820450,
+ 830717,
+ 840907,
+ 850964,
+ 861069,
+ 871222,
+ 881356,
+ 891470,
+ 901573,
+ 911612,
+ 921754,
+ 931892,
+ 941893,
+ 951969,
+ 962031,
+ 972155,
+ 982347,
+ 992531,
+ 1002678,
+ 1012722,
+ 1022802,
+ 1032827,
+ 1043047,
+ 1053166,
+ 1063204,
+ 1073415,
+ 1083590,
+ 1093731,
+ 1103844,
+ 1113914,
+ 1124075,
+ 1134341,
+ 1144490,
+ 1154545,
+ 1164755,
+ 1175011,
+ 1185189,
+ 1195359,
+ 1205508,
+ 1215725,
+ 1226005,
+ 1236238,
+ 1246442,
+ 1256533,
+ 1266619,
+ 1276736,
+ 1286779,
+ 1297019,
+ 1307046,
+ 1317312,
+ 1327418,
+ 1337430,
+ 1347674,
+ 1357706,
+ 1367819,
+ 1377885,
+ 1388063,
+ 1398132,
+ 1408194,
+ 1418235,
+ 1428421,
+ 1438473,
+ 1448486,
+ 1458511,
+ 1468621,
+ 1478707,
+ 1488809,
+ 1498973,
+ 1509180,
+ 1519321,
+ 1529401,
+ 1539490,
+ 1549607,
+ 1559754,
+ 1569821,
+ 1580005,
+ 1590221,
+ 1600450,
+ 1610609,
+ 1620717,
+ 1630961,
+ 1641221,
+ 1651225,
+ 1661334,
+ 1671558,
+ 1681608,
+ 1691630,
+ 1701665,
+ 1711681,
+ 1721915,
+ 1732035,
+ 1742290,
+ 1752325,
+ 1762529,
+ 1772769,
+ 1782970,
+ 1792991,
+ 1803129,
+ 1813298,
+ 1823322,
+ 1833470,
+ 1843699,
+ 1853838,
+ 1863988,
+ 1874138,
+ 1884357,
+ 1894568,
+ 1904646,
+ 1914776,
+ 1925021,
+ 1935060,
+ 1945284,
+ 1955531,
+ 1965755,
+ 1975880,
+ 1985931,
+ 1996175,
+ 2006263,
+ 2016287,
+ 2026501,
+ 2036692,
+ 2046729,
+ 2056938,
+ 2067110,
+ 2077359,
+ 2087531
+ ],
+ "test_return_std": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.7887253496087157
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.548982550716969
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.706557801317627
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.031947653536737
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.3267270635649835
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.5050183754233055
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.397411713949605
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.32923868863984
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.685160203196475
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.9319623598360858
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.263979948793469
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.4033976982698233
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.973935577394747
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.202261730053202
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.543107688016093
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.1575470087759605
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.991856863068914
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.817698378005379
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.088751635405545
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.508815278580718
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.524363919610591
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.505377160169532
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.252978878275911
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.062346642638641
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.616147042300386
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.48589974326126
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.7868483212036015
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.92986433805288
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.114863893871632
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.030873675335763
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.735670853692473
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.293515619889053
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.154439207634537
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.318539229825529
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.739619004976238
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.3085470863243485
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.433628867159949
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.31140241535735
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.281379178580925
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.029843239374062
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.22274535791221
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.524179919043515
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.5247789936085985
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.530056497149578
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.171074189811277
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.020767700169953
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.043847602435046
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.456086134658481
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.221488862390002
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.023560652933161
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.876236546375061
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.159481567983027
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.749651640878515
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.352146315840215
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.433248793129411
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.395553603393884
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.346957106807861
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.530157526284428
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.4296095406069185
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.5159753849600754
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.604987221065633
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.332322329616417
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.15375108672153
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.219987205554595
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.111405416538602
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.193427457594721
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.019206397135407
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.300932228442954
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.128071645315775
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.0259725452310455
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.780576604564076
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.235971016807491
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.795659414411142
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.785268174722414
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.284396916955188
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.933677629362535
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.789347606563887
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.203047072959643
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.2569012409890705
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.710847136982319
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.152308661083073
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.813160501768834
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.5031993957638194
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.9297949430145
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.453830584350644
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.895804112979464
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.8131460572603935
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.326583347968547
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.744297172623405
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.15616581750134
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.100191265612072
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.55705373768508
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.815629832493274
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.696025936844917
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.374208552680314
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.770916112411546
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.099374411931822
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.290652730916762
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.137560223052876
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.019768508295263
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.562455446746872
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.747871546898047
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.489948958264957
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.805360191330031
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.633261936357058
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.364273628883657
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.826036945124651
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.351788177131572
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.339350503673354
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.082950907529526
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.9044785280970316
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.0461136393508434
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.713464519414145
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.785590865899335
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.018935837743693
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.342983405037694
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.134545236707715
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.043332604238831
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.414978115275881
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.8586050845774835
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.802214851087273
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.067814949610986
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.045790238382634
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.188422399674452
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.376936759968248
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.5040975163159445
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.842070330617444
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.979454859318479
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.317946647426283
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.943245763020811
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.671721119340094
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.872581609954212
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.4368109961030076
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.1059777028090885
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.149882648175974
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.5674999298580636
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.856087311257129
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.662960086688956
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.15059079046075
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.236821895886886
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.480388987870922
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.760207742170542
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.875271720244496
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.832152356768968
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.3279588897519155
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.633432692392787
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.34072219798233
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.424821300759142
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.621035598301123
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.713925781389474
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.5144376574533824
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.266464042750944
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.881032671427392
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.74134757455559
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.998679372810552
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.852027313905084
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.6597627152771635
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.9501328070993535
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.720513548551851
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.604029554950303
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.34157014189653
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.9571095396566482
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.309020041689364
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.497577085901812
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.585056942273232
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.372036232972213
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.805050029608651
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.927458566773398
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.002710979057798
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.67188277946813
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.009563800789189
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.956715143526138
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.222887244692317
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.344259522605457
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.379570411426172
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.9165146594314595
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.415006427274377
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.542121185065482
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.446114278334573
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.110350979043952
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.425656863851396
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.099438175934109
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.889122262250047
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.2050771946439385
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.5858585071469653
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.393691666653346
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.0090318089160455
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.101260820709894
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.837598541648653
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.382953741690603
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.9745075983624853
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.6892844981734734
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.944348786697512
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.547605634152619
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.44337954360806
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.648721111676332
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.7864714187232376
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.337542690190621
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.8723508180958985
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.729489889715023
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.270015851390853
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.7133909307526167
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.5799690964381616
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.0318117946613095
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.094222498441259
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.9190877650738605
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.31301121005741
+ }
+ ],
+ "test_return_std_T": [
+ 204,
+ 10383,
+ 20499,
+ 30544,
+ 40732,
+ 50928,
+ 61090,
+ 71149,
+ 81348,
+ 91439,
+ 101594,
+ 111791,
+ 121859,
+ 132012,
+ 142192,
+ 152334,
+ 162438,
+ 172608,
+ 182793,
+ 192903,
+ 203004,
+ 213200,
+ 223268,
+ 233435,
+ 243539,
+ 253728,
+ 263840,
+ 273903,
+ 284085,
+ 294331,
+ 304469,
+ 314577,
+ 324715,
+ 334829,
+ 344974,
+ 355096,
+ 365159,
+ 375218,
+ 385451,
+ 395480,
+ 405603,
+ 415734,
+ 425903,
+ 436024,
+ 446044,
+ 456086,
+ 466137,
+ 476185,
+ 486421,
+ 496587,
+ 506781,
+ 516860,
+ 526869,
+ 537096,
+ 547346,
+ 557559,
+ 567625,
+ 577737,
+ 587967,
+ 598121,
+ 608154,
+ 618172,
+ 628277,
+ 638573,
+ 648758,
+ 658896,
+ 668985,
+ 678986,
+ 689111,
+ 699143,
+ 709193,
+ 719215,
+ 729432,
+ 739537,
+ 749704,
+ 759809,
+ 769880,
+ 779967,
+ 790179,
+ 800377,
+ 810400,
+ 820450,
+ 830717,
+ 840907,
+ 850964,
+ 861069,
+ 871222,
+ 881356,
+ 891470,
+ 901573,
+ 911612,
+ 921754,
+ 931892,
+ 941893,
+ 951969,
+ 962031,
+ 972155,
+ 982347,
+ 992531,
+ 1002678,
+ 1012722,
+ 1022802,
+ 1032827,
+ 1043047,
+ 1053166,
+ 1063204,
+ 1073415,
+ 1083590,
+ 1093731,
+ 1103844,
+ 1113914,
+ 1124075,
+ 1134341,
+ 1144490,
+ 1154545,
+ 1164755,
+ 1175011,
+ 1185189,
+ 1195359,
+ 1205508,
+ 1215725,
+ 1226005,
+ 1236238,
+ 1246442,
+ 1256533,
+ 1266619,
+ 1276736,
+ 1286779,
+ 1297019,
+ 1307046,
+ 1317312,
+ 1327418,
+ 1337430,
+ 1347674,
+ 1357706,
+ 1367819,
+ 1377885,
+ 1388063,
+ 1398132,
+ 1408194,
+ 1418235,
+ 1428421,
+ 1438473,
+ 1448486,
+ 1458511,
+ 1468621,
+ 1478707,
+ 1488809,
+ 1498973,
+ 1509180,
+ 1519321,
+ 1529401,
+ 1539490,
+ 1549607,
+ 1559754,
+ 1569821,
+ 1580005,
+ 1590221,
+ 1600450,
+ 1610609,
+ 1620717,
+ 1630961,
+ 1641221,
+ 1651225,
+ 1661334,
+ 1671558,
+ 1681608,
+ 1691630,
+ 1701665,
+ 1711681,
+ 1721915,
+ 1732035,
+ 1742290,
+ 1752325,
+ 1762529,
+ 1772769,
+ 1782970,
+ 1792991,
+ 1803129,
+ 1813298,
+ 1823322,
+ 1833470,
+ 1843699,
+ 1853838,
+ 1863988,
+ 1874138,
+ 1884357,
+ 1894568,
+ 1904646,
+ 1914776,
+ 1925021,
+ 1935060,
+ 1945284,
+ 1955531,
+ 1965755,
+ 1975880,
+ 1985931,
+ 1996175,
+ 2006263,
+ 2016287,
+ 2026501,
+ 2036692,
+ 2046729,
+ 2056938,
+ 2067110,
+ 2077359,
+ 2087531
+ ]
+}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/qmix/6/metrics.json b/results/sacred/10gen_protoss/qmix/6/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/10gen_protoss/qmix/6/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/qmix/6/run.json b/results/sacred/10gen_protoss/qmix/6/run.json
new file mode 100644
index 0000000..6ab322a
--- /dev/null
+++ b/results/sacred/10gen_protoss/qmix/6/run.json
@@ -0,0 +1,116 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_654daaa6534bcee62784d639ea63e51d.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "heartbeat": "2025-01-05T19:43:03.753377",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_size": 5000,
+ "epsilon_anneal_time": 100000,
+ "obs_agent_id": true,
+ "obs_last_action": false,
+ "runner": "parallel",
+ "t_max": 4050000,
+ "td_lambda": 0.6,
+ "use_tensorboard": false
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "obs_agent_id=True",
+ "obs_last_action=False",
+ "runner=parallel",
+ "batch_size_run=4",
+ "buffer_size=5000",
+ "t_max=4050000",
+ "epsilon_anneal_time=100000",
+ "batch_size=128",
+ "td_lambda=0.6",
+ "use_tensorboard=False"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2025-01-05T16:24:03.185725",
+ "status": "INTERRUPTED",
+ "stop_time": "2025-01-05T19:43:04.003168"
+}
\ No newline at end of file
diff --git a/results/sacred/10gen_protoss/qmix/_sources/logging_f71df6d788e929fac28afdf951d63d54.py b/results/sacred/10gen_protoss/qmix/_sources/logging_f71df6d788e929fac28afdf951d63d54.py
new file mode 100644
index 0000000..5393b7f
--- /dev/null
+++ b/results/sacred/10gen_protoss/qmix/_sources/logging_f71df6d788e929fac28afdf951d63d54.py
@@ -0,0 +1,68 @@
+from collections import defaultdict
+import logging
+import numpy as np
+import torch as th
+
+class Logger:
+ def __init__(self, console_logger):
+ self.console_logger = console_logger
+
+ self.use_tb = False
+ self.use_sacred = False
+ self.use_hdf = False
+
+ self.stats = defaultdict(lambda: [])
+
+ def setup_tb(self, directory_name):
+ # Import here so it doesn't have to be installed if you don't use it
+ from tensorboard_logger import configure, log_value
+ configure(directory_name)
+ self.tb_logger = log_value
+ self.use_tb = True
+
+ def setup_sacred(self, sacred_run_dict):
+ self.sacred_info = sacred_run_dict.info
+ self.use_sacred = True
+
+ def log_stat(self, key, value, t, to_sacred=True):
+ self.stats[key].append((t, value))
+
+ if self.use_tb:
+ self.tb_logger(key, value, t)
+
+ if self.use_sacred and to_sacred:
+ if key in self.sacred_info:
+ self.sacred_info["{}_T".format(key)].append(t)
+ self.sacred_info[key].append(value)
+ else:
+ self.sacred_info["{}_T".format(key)] = [t]
+ self.sacred_info[key] = [value]
+
+ def print_recent_stats(self):
+ log_str = "Recent Stats | t_env: {:>10} | Episode: {:>8}\n".format(*self.stats["episode"][-1])
+ i = 0
+ for (k, v) in sorted(self.stats.items()):
+ if k == "episode":
+ continue
+ i += 1
+ window = 5 if k != "epsilon" else 1
+ item = "{:.4f}".format(th.mean(th.tensor([float(x[1]) for x in self.stats[k][-window:]])))
+ log_str += "{:<25}{:>8}".format(k + ":", item)
+ log_str += "\n" if i % 4 == 0 else "\t"
+ self.console_logger.info(log_str)
+ # Reset stats to avoid accumulating logs in memory
+ self.stats = defaultdict(lambda: [])
+
+
+# set up a custom logger
+def get_logger():
+ logger = logging.getLogger()
+ logger.handlers = []
+ ch = logging.StreamHandler()
+ formatter = logging.Formatter('[%(levelname)s %(asctime)s] %(name)s %(message)s', '%H:%M:%S')
+ ch.setFormatter(formatter)
+ logger.addHandler(ch)
+ logger.setLevel('DEBUG')
+
+ return logger
+
diff --git a/results/sacred/10gen_protoss/qmix/_sources/main_654daaa6534bcee62784d639ea63e51d.py b/results/sacred/10gen_protoss/qmix/_sources/main_654daaa6534bcee62784d639ea63e51d.py
new file mode 100644
index 0000000..199f3a0
--- /dev/null
+++ b/results/sacred/10gen_protoss/qmix/_sources/main_654daaa6534bcee62784d639ea63e51d.py
@@ -0,0 +1,124 @@
+import random
+
+import numpy as np
+import os
+import collections
+from os.path import dirname, abspath, join
+from copy import deepcopy
+from sacred import Experiment, SETTINGS
+from sacred.observers import FileStorageObserver
+from sacred.utils import apply_backspaces_and_linefeeds
+import sys
+import torch as th
+from utils.logging import get_logger
+import yaml
+import collections.abc
+
+from run import REGISTRY as run_REGISTRY
+
+SETTINGS['CAPTURE_MODE'] = "no" # set to "no" if you want to see stdout/stderr in console "fd" or "no"
+logger = get_logger()
+
+ex = Experiment("pymarl")
+ex.logger = logger
+ex.captured_out_filter = apply_backspaces_and_linefeeds
+
+results_path = join(dirname(dirname(abspath(__file__))))
+
+
+@ex.main
+def my_main(_run, _config, _log):
+ # Setting the random seed throughout the modules
+ config = config_copy(_config)
+ random.seed(config["seed"])
+ np.random.seed(config["seed"])
+ th.manual_seed(config["seed"])
+ th.cuda.manual_seed(config["seed"])
+ # th.cuda.manual_seed_all(config["seed"])
+ th.backends.cudnn.deterministic = True # cudnn
+
+
+ config['env_args']['seed'] = config["seed"]
+
+ # run
+ run_REGISTRY[_config['run']](_run, config, _log)
+
+
+def _get_config(params, arg_name, subfolder):
+ config_name = None
+ for _i, _v in enumerate(params):
+ if _v.split("=")[0] == arg_name:
+ config_name = _v.split("=")[1]
+ del params[_i]
+ break
+
+ if config_name is not None:
+ with open(os.path.join(os.path.dirname(__file__), "config", subfolder, "{}.yaml".format(config_name)),
+ "r") as f:
+ try:
+ config_dict = yaml.load(f, Loader=yaml.SafeLoader)
+ except yaml.YAMLError as exc:
+ assert False, "{}.yaml error: {}".format(config_name, exc)
+ return config_dict
+
+
+def recursive_dict_update(d, u):
+ for k, v in u.items():
+ if isinstance(v, collections.abc.Mapping):
+ d[k] = recursive_dict_update(d.get(k, {}), v)
+ else:
+ d[k] = v
+ return d
+
+
+def config_copy(config):
+ if isinstance(config, dict):
+ return {k: config_copy(v) for k, v in config.items()}
+ elif isinstance(config, list):
+ return [config_copy(v) for v in config]
+ else:
+ return deepcopy(config)
+
+
+def parse_command(params, key, default):
+ result = default
+ for _i, _v in enumerate(params):
+ if _v.split("=")[0].strip() == key:
+ result = _v[_v.index('=') + 1:].strip()
+ break
+ return result
+
+
+if __name__ == '__main__':
+ params = deepcopy(sys.argv)
+
+ # Get the defaults from default.yaml
+ with open(os.path.join(os.path.dirname(__file__), "config", "default.yaml"), "r") as f:
+ try:
+ config_dict = yaml.load(f, Loader=yaml.SafeLoader)
+ except yaml.YAMLError as exc:
+ assert False, "default.yaml error: {}".format(exc)
+
+ # Load algorithm and env base configs
+ env_config = _get_config(params, "--env-config", "envs")
+ alg_config = _get_config(params, "--config", "algs")
+ # config_dict = {**config_dict, **env_config, **alg_config}
+ config_dict = recursive_dict_update(config_dict, env_config)
+ config_dict = recursive_dict_update(config_dict, alg_config)
+
+ # now add all the config to sacred
+ ex.add_config(config_dict)
+
+ # Save to disk by default for sacred
+ map_name = parse_command(params, "env_args.map_name", config_dict['env_args']['map_name'])
+ algo_name = parse_command(params, "name", config_dict['name'])
+ local_results_path = parse_command(params, "local_results_path", config_dict['local_results_path'])
+ file_obs_path = join(results_path, local_results_path, "sacred", map_name, algo_name)
+
+ logger.info("Saving to FileStorageObserver in {}.".format(file_obs_path))
+ ex.observers.append(FileStorageObserver.create(file_obs_path))
+
+ ex.run_commandline(params)
+
+ # flush
+ sys.stdout.flush()
diff --git a/results/sacred/5m_vs_6m/feudal/1/config.json b/results/sacred/5m_vs_6m/feudal/1/config.json
new file mode 100644
index 0000000..6d7644d
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/1/config.json
@@ -0,0 +1,97 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "feudal",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_cpu_only": true,
+ "buffer_size": 5000,
+ "c": 1,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_hidden_dim": 64,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "embedding_dim": 16,
+ "enable_parallel_computing": false,
+ "env": "sc2",
+ "env_args": {
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "map_name": "5m_vs_6m",
+ "move_amount": 2,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_pathing_grid": false,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 100000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gamma": 0.99,
+ "goal_dim": 16,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "hypernet_layers": 2,
+ "intrinsic_rewards_alpha": 0.001,
+ "label": "default_label",
+ "learner": "feudal_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.0005,
+ "mac": "feudal_mac",
+ "manager_hidden_dim": 64,
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "feudal",
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "repeat_id": 1,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "parallel",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 707858439,
+ "state_dim": 80,
+ "t_max": 100000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 4,
+ "use_cuda": true,
+ "use_tensorboard": true,
+ "vf_coef": 0.5,
+ "worker_hidden_dim": 64
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/1/cout.txt b/results/sacred/5m_vs_6m/feudal/1/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/5m_vs_6m/feudal/1/metrics.json b/results/sacred/5m_vs_6m/feudal/1/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/1/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/1/run.json b/results/sacred/5m_vs_6m/feudal/1/run.json
new file mode 100644
index 0000000..ee97619
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/1/run.json
@@ -0,0 +1,127 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_888918ff84cf3bc1e6e9dcc8919870b2.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "fail_trace": [
+ "Traceback (most recent call last):\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\sacred\\config\\captured_function.py\", line 42, in captured_function\n result = wrapped(*args, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\main.py\", line 44, in my_main\n run_REGISTRY[_config['run']](_run, config, _log)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 128, in run\n run_sequential(args=args, logger=logger)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 209, in run_sequential\n learner = le_REGISTRY[args.learner](mac, buffer.scheme, logger, args)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\learners\\FeUdal_learner.py\", line 45, in __init__\n self.critic = FeudalCritic(args.state_dim, args)\n",
+ "NameError: name 'FeudalCritic' is not defined\n"
+ ],
+ "heartbeat": "2024-12-29T18:43:24.691551",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_size": 5000,
+ "env_args": {
+ "map_name": "5m_vs_6m"
+ },
+ "epsilon_anneal_time": 100000,
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "runner": "parallel",
+ "t_max": 100000,
+ "td_lambda": 0.6
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "env_args.map_name=5m_vs_6m",
+ "obs_agent_id=True",
+ "obs_last_action=True",
+ "runner=parallel",
+ "batch_size_run=4",
+ "buffer_size=5000",
+ "t_max=100000",
+ "epsilon_anneal_time=100000",
+ "batch_size=128",
+ "td_lambda=0.6"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2024-12-29T18:43:20.056773",
+ "status": "FAILED",
+ "stop_time": "2024-12-29T18:43:24.693562"
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/10/config.json b/results/sacred/5m_vs_6m/feudal/10/config.json
new file mode 100644
index 0000000..4623928
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/10/config.json
@@ -0,0 +1,97 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "feudal",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_cpu_only": true,
+ "buffer_size": 5000,
+ "c": 1,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_hidden_dim": 64,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "embedding_dim": 16,
+ "enable_parallel_computing": false,
+ "env": "sc2",
+ "env_args": {
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "map_name": "5m_vs_6m",
+ "move_amount": 2,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_pathing_grid": false,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 100000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gamma": 0.99,
+ "goal_dim": 16,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "hypernet_layers": 2,
+ "intrinsic_rewards_alpha": 0.001,
+ "label": "default_label",
+ "learner": "feudal_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.0005,
+ "mac": "feudal_mac",
+ "manager_hidden_dim": 64,
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "feudal",
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "repeat_id": 1,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "parallel",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 507787181,
+ "state_dim": 80,
+ "t_max": 100000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 4,
+ "use_cuda": true,
+ "use_tensorboard": true,
+ "vf_coef": 0.5,
+ "worker_hidden_dim": 64
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/10/cout.txt b/results/sacred/5m_vs_6m/feudal/10/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/5m_vs_6m/feudal/10/metrics.json b/results/sacred/5m_vs_6m/feudal/10/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/10/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/10/run.json b/results/sacred/5m_vs_6m/feudal/10/run.json
new file mode 100644
index 0000000..b4eac7c
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/10/run.json
@@ -0,0 +1,131 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_888918ff84cf3bc1e6e9dcc8919870b2.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "fail_trace": [
+ "Traceback (most recent call last):\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\sacred\\config\\captured_function.py\", line 42, in captured_function\n result = wrapped(*args, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\main.py\", line 44, in my_main\n run_REGISTRY[_config['run']](_run, config, _log)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 128, in run\n run_sequential(args=args, logger=logger)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 261, in run_sequential\n episode_batch = runner.run(test_mode=False)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\runners\\parallel_runner.py\", line 120, in run\n actions = self.mac.select_actions(self.batch, t_ep=self.t, t_env=self.t_env, bs=envs_not_terminated,\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\controllers\\FeUdal_controller.py\", line 30, in select_actions\n agent_outputs, goal_outs, value_outs = self.forward(ep_batch, t_ep, test_mode=test_mode)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\controllers\\FeUdal_controller.py\", line 46, in forward\n worker_agent_outs, (new_worker_hidden, new_worker_cell), self.single_past_goals, self.batch_past_goals = self.worker_agent(\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\torch\\nn\\modules\\module.py\", line 1194, in _call_impl\n return forward_call(*input, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\modules\\agents\\FeUdal_agent.py\", line 94, in forward\n hidden_state = hidden_state.to(device)\n",
+ "AttributeError: 'list' object has no attribute 'to'\n"
+ ],
+ "heartbeat": "2024-12-29T18:59:54.567302",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_size": 5000,
+ "env_args": {
+ "map_name": "5m_vs_6m"
+ },
+ "epsilon_anneal_time": 100000,
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "runner": "parallel",
+ "t_max": 100000,
+ "td_lambda": 0.6
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "env_args.map_name=5m_vs_6m",
+ "obs_agent_id=True",
+ "obs_last_action=True",
+ "runner=parallel",
+ "batch_size_run=4",
+ "buffer_size=5000",
+ "t_max=100000",
+ "epsilon_anneal_time=100000",
+ "batch_size=128",
+ "td_lambda=0.6"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2024-12-29T18:59:31.491231",
+ "status": "FAILED",
+ "stop_time": "2024-12-29T18:59:54.569300"
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/11/config.json b/results/sacred/5m_vs_6m/feudal/11/config.json
new file mode 100644
index 0000000..928bf1a
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/11/config.json
@@ -0,0 +1,97 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "feudal",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_cpu_only": true,
+ "buffer_size": 5000,
+ "c": 1,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_hidden_dim": 64,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "embedding_dim": 16,
+ "enable_parallel_computing": false,
+ "env": "sc2",
+ "env_args": {
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "map_name": "5m_vs_6m",
+ "move_amount": 2,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_pathing_grid": false,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 100000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gamma": 0.99,
+ "goal_dim": 16,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "hypernet_layers": 2,
+ "intrinsic_rewards_alpha": 0.001,
+ "label": "default_label",
+ "learner": "feudal_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.0005,
+ "mac": "feudal_mac",
+ "manager_hidden_dim": 64,
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "feudal",
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "repeat_id": 1,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "parallel",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 181533407,
+ "state_dim": 80,
+ "t_max": 100000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 4,
+ "use_cuda": true,
+ "use_tensorboard": true,
+ "vf_coef": 0.5,
+ "worker_hidden_dim": 64
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/11/cout.txt b/results/sacred/5m_vs_6m/feudal/11/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/5m_vs_6m/feudal/11/metrics.json b/results/sacred/5m_vs_6m/feudal/11/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/11/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/11/run.json b/results/sacred/5m_vs_6m/feudal/11/run.json
new file mode 100644
index 0000000..a98c1d0
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/11/run.json
@@ -0,0 +1,131 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_888918ff84cf3bc1e6e9dcc8919870b2.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "fail_trace": [
+ "Traceback (most recent call last):\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\sacred\\config\\captured_function.py\", line 42, in captured_function\n result = wrapped(*args, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\main.py\", line 44, in my_main\n run_REGISTRY[_config['run']](_run, config, _log)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 128, in run\n run_sequential(args=args, logger=logger)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 261, in run_sequential\n episode_batch = runner.run(test_mode=False)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\runners\\parallel_runner.py\", line 120, in run\n actions = self.mac.select_actions(self.batch, t_ep=self.t, t_env=self.t_env, bs=envs_not_terminated,\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\controllers\\FeUdal_controller.py\", line 30, in select_actions\n agent_outputs, goal_outs, value_outs = self.forward(ep_batch, t_ep, test_mode=test_mode)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\controllers\\FeUdal_controller.py\", line 46, in forward\n worker_agent_outs, (new_worker_hidden, new_worker_cell), self.single_past_goals, self.batch_past_goals = self.worker_agent(\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\torch\\nn\\modules\\module.py\", line 1194, in _call_impl\n return forward_call(*input, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\modules\\agents\\FeUdal_agent.py\", line 106, in forward\n h_in, c_in = hidden_state\n",
+ "ValueError: not enough values to unpack (expected 2, got 0)\n"
+ ],
+ "heartbeat": "2024-12-29T19:01:01.107233",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_size": 5000,
+ "env_args": {
+ "map_name": "5m_vs_6m"
+ },
+ "epsilon_anneal_time": 100000,
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "runner": "parallel",
+ "t_max": 100000,
+ "td_lambda": 0.6
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "env_args.map_name=5m_vs_6m",
+ "obs_agent_id=True",
+ "obs_last_action=True",
+ "runner=parallel",
+ "batch_size_run=4",
+ "buffer_size=5000",
+ "t_max=100000",
+ "epsilon_anneal_time=100000",
+ "batch_size=128",
+ "td_lambda=0.6"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2024-12-29T19:00:37.074715",
+ "status": "FAILED",
+ "stop_time": "2024-12-29T19:01:01.109242"
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/12/config.json b/results/sacred/5m_vs_6m/feudal/12/config.json
new file mode 100644
index 0000000..882634f
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/12/config.json
@@ -0,0 +1,97 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "feudal",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_cpu_only": true,
+ "buffer_size": 5000,
+ "c": 1,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_hidden_dim": 64,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "embedding_dim": 16,
+ "enable_parallel_computing": false,
+ "env": "sc2",
+ "env_args": {
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "map_name": "5m_vs_6m",
+ "move_amount": 2,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_pathing_grid": false,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 100000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gamma": 0.99,
+ "goal_dim": 16,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "hypernet_layers": 2,
+ "intrinsic_rewards_alpha": 0.001,
+ "label": "default_label",
+ "learner": "feudal_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.0005,
+ "mac": "feudal_mac",
+ "manager_hidden_dim": 64,
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "feudal",
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "repeat_id": 1,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "parallel",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 437097629,
+ "state_dim": 80,
+ "t_max": 100000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 4,
+ "use_cuda": true,
+ "use_tensorboard": true,
+ "vf_coef": 0.5,
+ "worker_hidden_dim": 64
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/12/cout.txt b/results/sacred/5m_vs_6m/feudal/12/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/5m_vs_6m/feudal/12/info.json b/results/sacred/5m_vs_6m/feudal/12/info.json
new file mode 100644
index 0000000..424bbe8
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/12/info.json
@@ -0,0 +1,136 @@
+{
+ "battle_won_mean": [
+ 0.0
+ ],
+ "battle_won_mean_T": [
+ 150
+ ],
+ "dead_allies_mean": [
+ 4.75
+ ],
+ "dead_allies_mean_T": [
+ 150
+ ],
+ "dead_enemies_mean": [
+ 0.0
+ ],
+ "dead_enemies_mean_T": [
+ 150
+ ],
+ "ep_length_mean": [
+ 37.5
+ ],
+ "ep_length_mean_T": [
+ 150
+ ],
+ "epsilon": [
+ 1.0
+ ],
+ "epsilon_T": [
+ 150
+ ],
+ "return_max": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.5849056603773584
+ }
+ ],
+ "return_max_T": [
+ 150
+ ],
+ "return_mean": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.2452830188679245
+ }
+ ],
+ "return_mean_T": [
+ 150
+ ],
+ "return_min": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.9056603773584906
+ }
+ ],
+ "return_min_T": [
+ 150
+ ],
+ "return_std": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.25313977103771196
+ }
+ ],
+ "return_std_T": [
+ 150
+ ],
+ "test_battle_won_mean": [
+ 0.0
+ ],
+ "test_battle_won_mean_T": [
+ 150
+ ],
+ "test_dead_allies_mean": [
+ 5.0
+ ],
+ "test_dead_allies_mean_T": [
+ 150
+ ],
+ "test_dead_enemies_mean": [
+ 0.0
+ ],
+ "test_dead_enemies_mean_T": [
+ 150
+ ],
+ "test_ep_length_mean": [
+ 20.78125
+ ],
+ "test_ep_length_mean_T": [
+ 150
+ ],
+ "test_return_max": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.0
+ }
+ ],
+ "test_return_max_T": [
+ 150
+ ],
+ "test_return_mean": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.0
+ }
+ ],
+ "test_return_mean_T": [
+ 150
+ ],
+ "test_return_min": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.0
+ }
+ ],
+ "test_return_min_T": [
+ 150
+ ],
+ "test_return_std": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.0
+ }
+ ],
+ "test_return_std_T": [
+ 150
+ ]
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/12/metrics.json b/results/sacred/5m_vs_6m/feudal/12/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/12/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/12/run.json b/results/sacred/5m_vs_6m/feudal/12/run.json
new file mode 100644
index 0000000..863e000
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/12/run.json
@@ -0,0 +1,132 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_888918ff84cf3bc1e6e9dcc8919870b2.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "fail_trace": [
+ "Traceback (most recent call last):\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\sacred\\config\\captured_function.py\", line 42, in captured_function\n result = wrapped(*args, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\main.py\", line 44, in my_main\n run_REGISTRY[_config['run']](_run, config, _log)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 128, in run\n run_sequential(args=args, logger=logger)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 280, in run_sequential\n learner.train(episode_sample, runner.t_env, episode)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\learners\\FeUdal_learner.py\", line 60, in train\n agent_outs, goals, values = self.mac.forward(batch, t=t)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\controllers\\FeUdal_controller.py\", line 39, in forward\n manager_goal, manager_value, (new_manager_hidden, new_manager_cell) = self.manager_agent(\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\torch\\nn\\modules\\module.py\", line 1194, in _call_impl\n return forward_call(*input, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\modules\\agents\\FeUdal_agent.py\", line 29, in forward\n x = F.relu(self.manager_fc1(inputs))\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\torch\\nn\\modules\\module.py\", line 1194, in _call_impl\n return forward_call(*input, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\torch\\nn\\modules\\linear.py\", line 114, in forward\n return F.linear(input, self.weight, self.bias)\n",
+ "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cpu and cuda:0! (when checking argument for argument mat1 in method wrapper_addmm)\n"
+ ],
+ "heartbeat": "2024-12-29T19:02:57.737769",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_size": 5000,
+ "env_args": {
+ "map_name": "5m_vs_6m"
+ },
+ "epsilon_anneal_time": 100000,
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "runner": "parallel",
+ "t_max": 100000,
+ "td_lambda": 0.6
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "env_args.map_name=5m_vs_6m",
+ "obs_agent_id=True",
+ "obs_last_action=True",
+ "runner=parallel",
+ "batch_size_run=4",
+ "buffer_size=5000",
+ "t_max=100000",
+ "epsilon_anneal_time=100000",
+ "batch_size=128",
+ "td_lambda=0.6"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2024-12-29T19:02:24.077301",
+ "status": "FAILED",
+ "stop_time": "2024-12-29T19:02:57.740778"
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/13/config.json b/results/sacred/5m_vs_6m/feudal/13/config.json
new file mode 100644
index 0000000..19628e1
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/13/config.json
@@ -0,0 +1,97 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "feudal",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_cpu_only": true,
+ "buffer_size": 5000,
+ "c": 1,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_hidden_dim": 64,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "embedding_dim": 16,
+ "enable_parallel_computing": false,
+ "env": "sc2",
+ "env_args": {
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "map_name": "5m_vs_6m",
+ "move_amount": 2,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_pathing_grid": false,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 100000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gamma": 0.99,
+ "goal_dim": 16,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "hypernet_layers": 2,
+ "intrinsic_rewards_alpha": 0.001,
+ "label": "default_label",
+ "learner": "feudal_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.0005,
+ "mac": "feudal_mac",
+ "manager_hidden_dim": 64,
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "feudal",
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "repeat_id": 1,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "parallel",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 371392846,
+ "state_dim": 80,
+ "t_max": 100000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 4,
+ "use_cuda": true,
+ "use_tensorboard": true,
+ "vf_coef": 0.5,
+ "worker_hidden_dim": 64
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/13/cout.txt b/results/sacred/5m_vs_6m/feudal/13/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/5m_vs_6m/feudal/13/metrics.json b/results/sacred/5m_vs_6m/feudal/13/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/13/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/13/run.json b/results/sacred/5m_vs_6m/feudal/13/run.json
new file mode 100644
index 0000000..22c55a0
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/13/run.json
@@ -0,0 +1,118 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_888918ff84cf3bc1e6e9dcc8919870b2.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "heartbeat": "2024-12-29T19:10:51.714337",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_size": 5000,
+ "env_args": {
+ "map_name": "5m_vs_6m"
+ },
+ "epsilon_anneal_time": 100000,
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "runner": "parallel",
+ "t_max": 100000,
+ "td_lambda": 0.6
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "env_args.map_name=5m_vs_6m",
+ "obs_agent_id=True",
+ "obs_last_action=True",
+ "runner=parallel",
+ "batch_size_run=4",
+ "buffer_size=5000",
+ "t_max=100000",
+ "epsilon_anneal_time=100000",
+ "batch_size=128",
+ "td_lambda=0.6"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2024-12-29T19:10:51.629933",
+ "status": "INTERRUPTED",
+ "stop_time": "2024-12-29T19:10:51.716375"
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/14/config.json b/results/sacred/5m_vs_6m/feudal/14/config.json
new file mode 100644
index 0000000..e9a7a29
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/14/config.json
@@ -0,0 +1,97 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "feudal",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 32,
+ "batch_size_run": 1,
+ "buffer_cpu_only": true,
+ "buffer_size": 32,
+ "c": 1,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_hidden_dim": 64,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "embedding_dim": 16,
+ "enable_parallel_computing": false,
+ "env": "sc2",
+ "env_args": {
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "map_name": "5m_vs_6m",
+ "move_amount": 2,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_pathing_grid": false,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 50000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gamma": 0.99,
+ "goal_dim": 16,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "hypernet_layers": 2,
+ "intrinsic_rewards_alpha": 0.001,
+ "label": "default_label",
+ "learner": "feudal_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.0005,
+ "mac": "feudal_mac",
+ "manager_hidden_dim": 64,
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "feudal",
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "repeat_id": 1,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "episode",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 466041091,
+ "state_dim": 80,
+ "t_max": 2050000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 4,
+ "use_cuda": true,
+ "use_tensorboard": true,
+ "vf_coef": 0.5,
+ "worker_hidden_dim": 64
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/14/cout.txt b/results/sacred/5m_vs_6m/feudal/14/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/5m_vs_6m/feudal/14/metrics.json b/results/sacred/5m_vs_6m/feudal/14/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/14/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/14/run.json b/results/sacred/5m_vs_6m/feudal/14/run.json
new file mode 100644
index 0000000..a4ad6bb
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/14/run.json
@@ -0,0 +1,114 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_888918ff84cf3bc1e6e9dcc8919870b2.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "fail_trace": [
+ "Traceback (most recent call last):\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\sacred\\config\\captured_function.py\", line 42, in captured_function\n result = wrapped(*args, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\main.py\", line 44, in my_main\n run_REGISTRY[_config['run']](_run, config, _log)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 128, in run\n run_sequential(args=args, logger=logger)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 161, in run_sequential\n env_info = runner.get_env_info()\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\runners\\episode_runner.py\", line 47, in get_env_info\n return self.env.get_env_info()\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\envs\\smac_v1\\StarCraft2EnvWrapper.py\", line 61, in get_env_info\n print(env_info)\n",
+ "OSError: [WinError 1] \u529f\u80fd\u932f\u8aa4\u3002\n"
+ ],
+ "heartbeat": "2024-12-29T19:11:26.734449",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "env_args": {
+ "map_name": "5m_vs_6m"
+ },
+ "obs_agent_id": true,
+ "obs_last_action": true
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "env_args.map_name=5m_vs_6m",
+ "obs_agent_id=True",
+ "obs_last_action=True"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2024-12-29T19:11:26.237023",
+ "status": "FAILED",
+ "stop_time": "2024-12-29T19:11:26.736456"
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/15/config.json b/results/sacred/5m_vs_6m/feudal/15/config.json
new file mode 100644
index 0000000..eed5543
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/15/config.json
@@ -0,0 +1,97 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "feudal",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 32,
+ "batch_size_run": 1,
+ "buffer_cpu_only": true,
+ "buffer_size": 32,
+ "c": 1,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_hidden_dim": 64,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "embedding_dim": 16,
+ "enable_parallel_computing": false,
+ "env": "sc2",
+ "env_args": {
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "map_name": "5m_vs_6m",
+ "move_amount": 2,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_pathing_grid": false,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 50000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gamma": 0.99,
+ "goal_dim": 16,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "hypernet_layers": 2,
+ "intrinsic_rewards_alpha": 0.001,
+ "label": "default_label",
+ "learner": "feudal_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.0005,
+ "mac": "feudal_mac",
+ "manager_hidden_dim": 64,
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "feudal",
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "repeat_id": 1,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "episode",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 682299016,
+ "state_dim": 80,
+ "t_max": 2050000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 4,
+ "use_cuda": true,
+ "use_tensorboard": true,
+ "vf_coef": 0.5,
+ "worker_hidden_dim": 64
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/15/cout.txt b/results/sacred/5m_vs_6m/feudal/15/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/5m_vs_6m/feudal/15/metrics.json b/results/sacred/5m_vs_6m/feudal/15/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/15/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/15/run.json b/results/sacred/5m_vs_6m/feudal/15/run.json
new file mode 100644
index 0000000..87af068
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/15/run.json
@@ -0,0 +1,114 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_888918ff84cf3bc1e6e9dcc8919870b2.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "fail_trace": [
+ "Traceback (most recent call last):\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\sacred\\config\\captured_function.py\", line 42, in captured_function\n result = wrapped(*args, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\main.py\", line 44, in my_main\n run_REGISTRY[_config['run']](_run, config, _log)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 128, in run\n run_sequential(args=args, logger=logger)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 161, in run_sequential\n env_info = runner.get_env_info()\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\runners\\episode_runner.py\", line 47, in get_env_info\n return self.env.get_env_info()\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\envs\\smac_v1\\StarCraft2EnvWrapper.py\", line 42, in get_env_info\n print(\"\u958b\u59cb\u7372\u53d6\u74b0\u5883\u4fe1\u606f...\") # \u6dfb\u52a0\u8abf\u8a66\u4fe1\u606f\n",
+ "OSError: [WinError 1] \u529f\u80fd\u932f\u8aa4\u3002\n"
+ ],
+ "heartbeat": "2024-12-29T19:12:44.471688",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "env_args": {
+ "map_name": "5m_vs_6m"
+ },
+ "obs_agent_id": true,
+ "obs_last_action": true
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "env_args.map_name=5m_vs_6m",
+ "obs_agent_id=True",
+ "obs_last_action=True"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2024-12-29T19:12:43.954086",
+ "status": "FAILED",
+ "stop_time": "2024-12-29T19:12:44.473189"
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/16/config.json b/results/sacred/5m_vs_6m/feudal/16/config.json
new file mode 100644
index 0000000..f4d8227
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/16/config.json
@@ -0,0 +1,97 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "feudal",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 32,
+ "batch_size_run": 1,
+ "buffer_cpu_only": true,
+ "buffer_size": 32,
+ "c": 1,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_hidden_dim": 64,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "embedding_dim": 16,
+ "enable_parallel_computing": false,
+ "env": "sc2",
+ "env_args": {
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "map_name": "5m_vs_6m",
+ "move_amount": 2,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_pathing_grid": false,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 50000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gamma": 0.99,
+ "goal_dim": 16,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "hypernet_layers": 2,
+ "intrinsic_rewards_alpha": 0.001,
+ "label": "default_label",
+ "learner": "feudal_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.0005,
+ "mac": "feudal_mac",
+ "manager_hidden_dim": 64,
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "feudal",
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "repeat_id": 1,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "episode",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 762805705,
+ "state_dim": 80,
+ "t_max": 2050000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 4,
+ "use_cuda": true,
+ "use_tensorboard": true,
+ "vf_coef": 0.5,
+ "worker_hidden_dim": 64
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/16/cout.txt b/results/sacred/5m_vs_6m/feudal/16/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/5m_vs_6m/feudal/16/metrics.json b/results/sacred/5m_vs_6m/feudal/16/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/16/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/16/run.json b/results/sacred/5m_vs_6m/feudal/16/run.json
new file mode 100644
index 0000000..1663f7f
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/16/run.json
@@ -0,0 +1,114 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_888918ff84cf3bc1e6e9dcc8919870b2.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "fail_trace": [
+ "Traceback (most recent call last):\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\sacred\\config\\captured_function.py\", line 42, in captured_function\n result = wrapped(*args, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\main.py\", line 44, in my_main\n run_REGISTRY[_config['run']](_run, config, _log)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 128, in run\n run_sequential(args=args, logger=logger)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 161, in run_sequential\n env_info = runner.get_env_info()\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\runners\\episode_runner.py\", line 47, in get_env_info\n return self.env.get_env_info()\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\envs\\smac_v1\\StarCraft2EnvWrapper.py\", line 42, in get_env_info\n print(\"Starting to get environment info...\") # \u6539\u7528\u82f1\u6587\n",
+ "OSError: [WinError 1] \u529f\u80fd\u932f\u8aa4\u3002\n"
+ ],
+ "heartbeat": "2024-12-29T19:14:25.404494",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "env_args": {
+ "map_name": "5m_vs_6m"
+ },
+ "obs_agent_id": true,
+ "obs_last_action": true
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "env_args.map_name=5m_vs_6m",
+ "obs_agent_id=True",
+ "obs_last_action=True"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2024-12-29T19:14:24.916934",
+ "status": "FAILED",
+ "stop_time": "2024-12-29T19:14:25.405490"
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/17/config.json b/results/sacred/5m_vs_6m/feudal/17/config.json
new file mode 100644
index 0000000..0d9de35
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/17/config.json
@@ -0,0 +1,97 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "feudal",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 32,
+ "batch_size_run": 1,
+ "buffer_cpu_only": true,
+ "buffer_size": 32,
+ "c": 1,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_hidden_dim": 64,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "embedding_dim": 16,
+ "enable_parallel_computing": false,
+ "env": "sc2",
+ "env_args": {
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "map_name": "5m_vs_6m",
+ "move_amount": 2,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_pathing_grid": false,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 50000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gamma": 0.99,
+ "goal_dim": 16,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "hypernet_layers": 2,
+ "intrinsic_rewards_alpha": 0.001,
+ "label": "default_label",
+ "learner": "feudal_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.0005,
+ "mac": "feudal_mac",
+ "manager_hidden_dim": 64,
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "feudal",
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "repeat_id": 1,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "episode",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 106831448,
+ "state_dim": 80,
+ "t_max": 2050000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 4,
+ "use_cuda": true,
+ "use_tensorboard": true,
+ "vf_coef": 0.5,
+ "worker_hidden_dim": 64
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/17/cout.txt b/results/sacred/5m_vs_6m/feudal/17/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/5m_vs_6m/feudal/17/metrics.json b/results/sacred/5m_vs_6m/feudal/17/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/17/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/17/run.json b/results/sacred/5m_vs_6m/feudal/17/run.json
new file mode 100644
index 0000000..440f00b
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/17/run.json
@@ -0,0 +1,114 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_888918ff84cf3bc1e6e9dcc8919870b2.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "fail_trace": [
+ "Traceback (most recent call last):\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\sacred\\config\\captured_function.py\", line 42, in captured_function\n result = wrapped(*args, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\main.py\", line 44, in my_main\n run_REGISTRY[_config['run']](_run, config, _log)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 128, in run\n run_sequential(args=args, logger=logger)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 161, in run_sequential\n env_info = runner.get_env_info()\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\runners\\episode_runner.py\", line 47, in get_env_info\n return self.env.get_env_info()\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\envs\\smac_v1\\StarCraft2EnvWrapper.py\", line 42, in get_env_info\n print(\"Starting to get environment info...\")\n",
+ "OSError: [WinError 1] \u529f\u80fd\u932f\u8aa4\u3002\n"
+ ],
+ "heartbeat": "2024-12-29T19:18:05.729217",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "env_args": {
+ "map_name": "5m_vs_6m"
+ },
+ "obs_agent_id": true,
+ "obs_last_action": true
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "env_args.map_name=5m_vs_6m",
+ "obs_agent_id=True",
+ "obs_last_action=True"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2024-12-29T19:18:05.247932",
+ "status": "FAILED",
+ "stop_time": "2024-12-29T19:18:05.731222"
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/18/config.json b/results/sacred/5m_vs_6m/feudal/18/config.json
new file mode 100644
index 0000000..76575ac
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/18/config.json
@@ -0,0 +1,97 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "feudal",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 32,
+ "batch_size_run": 1,
+ "buffer_cpu_only": true,
+ "buffer_size": 32,
+ "c": 1,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_hidden_dim": 64,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "embedding_dim": 16,
+ "enable_parallel_computing": false,
+ "env": "sc2",
+ "env_args": {
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "map_name": "5m_vs_6m",
+ "move_amount": 2,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_pathing_grid": false,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 50000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gamma": 0.99,
+ "goal_dim": 16,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "hypernet_layers": 2,
+ "intrinsic_rewards_alpha": 0.001,
+ "label": "default_label",
+ "learner": "feudal_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.0005,
+ "mac": "feudal_mac",
+ "manager_hidden_dim": 64,
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "feudal",
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "repeat_id": 1,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "episode",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 883123739,
+ "state_dim": 80,
+ "t_max": 2050000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 4,
+ "use_cuda": true,
+ "use_tensorboard": true,
+ "vf_coef": 0.5,
+ "worker_hidden_dim": 64
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/18/cout.txt b/results/sacred/5m_vs_6m/feudal/18/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/5m_vs_6m/feudal/18/metrics.json b/results/sacred/5m_vs_6m/feudal/18/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/18/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/18/run.json b/results/sacred/5m_vs_6m/feudal/18/run.json
new file mode 100644
index 0000000..5a1cfec
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/18/run.json
@@ -0,0 +1,114 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_888918ff84cf3bc1e6e9dcc8919870b2.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "fail_trace": [
+ "Traceback (most recent call last):\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\sacred\\config\\captured_function.py\", line 42, in captured_function\n result = wrapped(*args, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\main.py\", line 44, in my_main\n run_REGISTRY[_config['run']](_run, config, _log)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 128, in run\n run_sequential(args=args, logger=logger)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 161, in run_sequential\n env_info = runner.get_env_info()\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\runners\\episode_runner.py\", line 47, in get_env_info\n return self.env.get_env_info()\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\envs\\smac_v1\\StarCraft2EnvWrapper.py\", line 42, in get_env_info\n print(\"Starting to get environment info...\") # \u6539\u7528\u82f1\u6587\n",
+ "OSError: [WinError 1] \u529f\u80fd\u932f\u8aa4\u3002\n"
+ ],
+ "heartbeat": "2024-12-29T19:19:47.567781",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "env_args": {
+ "map_name": "5m_vs_6m"
+ },
+ "obs_agent_id": true,
+ "obs_last_action": true
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "env_args.map_name=5m_vs_6m",
+ "obs_agent_id=True",
+ "obs_last_action=True"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2024-12-29T19:19:47.096681",
+ "status": "FAILED",
+ "stop_time": "2024-12-29T19:19:47.569280"
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/2/config.json b/results/sacred/5m_vs_6m/feudal/2/config.json
new file mode 100644
index 0000000..49e0dbc
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/2/config.json
@@ -0,0 +1,97 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "feudal",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_cpu_only": true,
+ "buffer_size": 5000,
+ "c": 1,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_hidden_dim": 64,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "embedding_dim": 16,
+ "enable_parallel_computing": false,
+ "env": "sc2",
+ "env_args": {
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "map_name": "5m_vs_6m",
+ "move_amount": 2,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_pathing_grid": false,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 100000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gamma": 0.99,
+ "goal_dim": 16,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "hypernet_layers": 2,
+ "intrinsic_rewards_alpha": 0.001,
+ "label": "default_label",
+ "learner": "feudal_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.0005,
+ "mac": "feudal_mac",
+ "manager_hidden_dim": 64,
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "feudal",
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "repeat_id": 1,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "parallel",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 499145602,
+ "state_dim": 80,
+ "t_max": 100000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 4,
+ "use_cuda": true,
+ "use_tensorboard": true,
+ "vf_coef": 0.5,
+ "worker_hidden_dim": 64
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/2/cout.txt b/results/sacred/5m_vs_6m/feudal/2/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/5m_vs_6m/feudal/2/metrics.json b/results/sacred/5m_vs_6m/feudal/2/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/2/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/2/run.json b/results/sacred/5m_vs_6m/feudal/2/run.json
new file mode 100644
index 0000000..6022f05
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/2/run.json
@@ -0,0 +1,127 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_888918ff84cf3bc1e6e9dcc8919870b2.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "fail_trace": [
+ "Traceback (most recent call last):\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\sacred\\config\\captured_function.py\", line 42, in captured_function\n result = wrapped(*args, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\main.py\", line 44, in my_main\n run_REGISTRY[_config['run']](_run, config, _log)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 128, in run\n run_sequential(args=args, logger=logger)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 209, in run_sequential\n learner = le_REGISTRY[args.learner](mac, buffer.scheme, logger, args)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\learners\\FeUdal_learner.py\", line 49, in __init__\n self.manager_params += list(self.critic.parameters())\n",
+ "AttributeError: 'FeudalLearner' object has no attribute 'critic'\n"
+ ],
+ "heartbeat": "2024-12-29T18:43:55.150259",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_size": 5000,
+ "env_args": {
+ "map_name": "5m_vs_6m"
+ },
+ "epsilon_anneal_time": 100000,
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "runner": "parallel",
+ "t_max": 100000,
+ "td_lambda": 0.6
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "env_args.map_name=5m_vs_6m",
+ "obs_agent_id=True",
+ "obs_last_action=True",
+ "runner=parallel",
+ "batch_size_run=4",
+ "buffer_size=5000",
+ "t_max=100000",
+ "epsilon_anneal_time=100000",
+ "batch_size=128",
+ "td_lambda=0.6"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2024-12-29T18:43:50.734338",
+ "status": "FAILED",
+ "stop_time": "2024-12-29T18:43:55.151759"
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/3/config.json b/results/sacred/5m_vs_6m/feudal/3/config.json
new file mode 100644
index 0000000..22f3c27
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/3/config.json
@@ -0,0 +1,97 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "feudal",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_cpu_only": true,
+ "buffer_size": 5000,
+ "c": 1,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_hidden_dim": 64,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "embedding_dim": 16,
+ "enable_parallel_computing": false,
+ "env": "sc2",
+ "env_args": {
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "map_name": "5m_vs_6m",
+ "move_amount": 2,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_pathing_grid": false,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 100000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gamma": 0.99,
+ "goal_dim": 16,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "hypernet_layers": 2,
+ "intrinsic_rewards_alpha": 0.001,
+ "label": "default_label",
+ "learner": "feudal_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.0005,
+ "mac": "feudal_mac",
+ "manager_hidden_dim": 64,
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "feudal",
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "repeat_id": 1,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "parallel",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 517178064,
+ "state_dim": 80,
+ "t_max": 100000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 4,
+ "use_cuda": true,
+ "use_tensorboard": true,
+ "vf_coef": 0.5,
+ "worker_hidden_dim": 64
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/3/cout.txt b/results/sacred/5m_vs_6m/feudal/3/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/5m_vs_6m/feudal/3/metrics.json b/results/sacred/5m_vs_6m/feudal/3/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/3/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/3/run.json b/results/sacred/5m_vs_6m/feudal/3/run.json
new file mode 100644
index 0000000..a6f3a4a
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/3/run.json
@@ -0,0 +1,127 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_888918ff84cf3bc1e6e9dcc8919870b2.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "fail_trace": [
+ "Traceback (most recent call last):\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\sacred\\config\\captured_function.py\", line 42, in captured_function\n result = wrapped(*args, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\main.py\", line 44, in my_main\n run_REGISTRY[_config['run']](_run, config, _log)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 128, in run\n run_sequential(args=args, logger=logger)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 209, in run_sequential\n learner = le_REGISTRY[args.learner](mac, buffer.scheme, logger, args)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\learners\\FeUdal_learner.py\", line 52, in __init__\n self.critic_optimiser = RMSprop(params=self.critic.parameters(),\n",
+ "AttributeError: 'FeudalLearner' object has no attribute 'critic'\n"
+ ],
+ "heartbeat": "2024-12-29T18:44:14.125164",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_size": 5000,
+ "env_args": {
+ "map_name": "5m_vs_6m"
+ },
+ "epsilon_anneal_time": 100000,
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "runner": "parallel",
+ "t_max": 100000,
+ "td_lambda": 0.6
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "env_args.map_name=5m_vs_6m",
+ "obs_agent_id=True",
+ "obs_last_action=True",
+ "runner=parallel",
+ "batch_size_run=4",
+ "buffer_size=5000",
+ "t_max=100000",
+ "epsilon_anneal_time=100000",
+ "batch_size=128",
+ "td_lambda=0.6"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2024-12-29T18:44:09.705383",
+ "status": "FAILED",
+ "stop_time": "2024-12-29T18:44:14.133193"
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/4/config.json b/results/sacred/5m_vs_6m/feudal/4/config.json
new file mode 100644
index 0000000..0bf0bcb
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/4/config.json
@@ -0,0 +1,97 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "feudal",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_cpu_only": true,
+ "buffer_size": 5000,
+ "c": 1,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_hidden_dim": 64,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "embedding_dim": 16,
+ "enable_parallel_computing": false,
+ "env": "sc2",
+ "env_args": {
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "map_name": "5m_vs_6m",
+ "move_amount": 2,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_pathing_grid": false,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 100000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gamma": 0.99,
+ "goal_dim": 16,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "hypernet_layers": 2,
+ "intrinsic_rewards_alpha": 0.001,
+ "label": "default_label",
+ "learner": "feudal_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.0005,
+ "mac": "feudal_mac",
+ "manager_hidden_dim": 64,
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "feudal",
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "repeat_id": 1,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "parallel",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 872351197,
+ "state_dim": 80,
+ "t_max": 100000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 4,
+ "use_cuda": true,
+ "use_tensorboard": true,
+ "vf_coef": 0.5,
+ "worker_hidden_dim": 64
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/4/cout.txt b/results/sacred/5m_vs_6m/feudal/4/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/5m_vs_6m/feudal/4/metrics.json b/results/sacred/5m_vs_6m/feudal/4/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/4/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/4/run.json b/results/sacred/5m_vs_6m/feudal/4/run.json
new file mode 100644
index 0000000..edbaf43
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/4/run.json
@@ -0,0 +1,128 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_888918ff84cf3bc1e6e9dcc8919870b2.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "fail_trace": [
+ "Traceback (most recent call last):\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\sacred\\config\\captured_function.py\", line 42, in captured_function\n result = wrapped(*args, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\main.py\", line 44, in my_main\n run_REGISTRY[_config['run']](_run, config, _log)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 128, in run\n run_sequential(args=args, logger=logger)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 261, in run_sequential\n episode_batch = runner.run(test_mode=False)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\runners\\parallel_runner.py\", line 102, in run\n self.reset()\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\runners\\parallel_runner.py\", line 77, in reset\n if (self.args.use_cuda and self.args.cpu_inference) and str(self.mac.get_device()) != \"cpu\":\n",
+ "AttributeError: 'FeUdalMAC' object has no attribute 'get_device'\n"
+ ],
+ "heartbeat": "2024-12-29T18:46:09.443531",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_size": 5000,
+ "env_args": {
+ "map_name": "5m_vs_6m"
+ },
+ "epsilon_anneal_time": 100000,
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "runner": "parallel",
+ "t_max": 100000,
+ "td_lambda": 0.6
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "env_args.map_name=5m_vs_6m",
+ "obs_agent_id=True",
+ "obs_last_action=True",
+ "runner=parallel",
+ "batch_size_run=4",
+ "buffer_size=5000",
+ "t_max=100000",
+ "epsilon_anneal_time=100000",
+ "batch_size=128",
+ "td_lambda=0.6"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2024-12-29T18:46:05.004412",
+ "status": "FAILED",
+ "stop_time": "2024-12-29T18:46:09.445530"
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/5/config.json b/results/sacred/5m_vs_6m/feudal/5/config.json
new file mode 100644
index 0000000..9ef4710
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/5/config.json
@@ -0,0 +1,97 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "feudal",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_cpu_only": true,
+ "buffer_size": 5000,
+ "c": 1,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_hidden_dim": 64,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "embedding_dim": 16,
+ "enable_parallel_computing": false,
+ "env": "sc2",
+ "env_args": {
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "map_name": "5m_vs_6m",
+ "move_amount": 2,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_pathing_grid": false,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 100000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gamma": 0.99,
+ "goal_dim": 16,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "hypernet_layers": 2,
+ "intrinsic_rewards_alpha": 0.001,
+ "label": "default_label",
+ "learner": "feudal_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.0005,
+ "mac": "feudal_mac",
+ "manager_hidden_dim": 64,
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "feudal",
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "repeat_id": 1,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "parallel",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 596105235,
+ "state_dim": 80,
+ "t_max": 100000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 4,
+ "use_cuda": true,
+ "use_tensorboard": true,
+ "vf_coef": 0.5,
+ "worker_hidden_dim": 64
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/5/cout.txt b/results/sacred/5m_vs_6m/feudal/5/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/5m_vs_6m/feudal/5/metrics.json b/results/sacred/5m_vs_6m/feudal/5/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/5/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/5/run.json b/results/sacred/5m_vs_6m/feudal/5/run.json
new file mode 100644
index 0000000..8d69791
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/5/run.json
@@ -0,0 +1,130 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_888918ff84cf3bc1e6e9dcc8919870b2.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "fail_trace": [
+ "Traceback (most recent call last):\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\sacred\\config\\captured_function.py\", line 42, in captured_function\n result = wrapped(*args, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\main.py\", line 44, in my_main\n run_REGISTRY[_config['run']](_run, config, _log)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 128, in run\n run_sequential(args=args, logger=logger)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 261, in run_sequential\n episode_batch = runner.run(test_mode=False)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\runners\\parallel_runner.py\", line 102, in run\n self.reset()\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\runners\\parallel_runner.py\", line 77, in reset\n if (self.args.use_cuda and self.args.cpu_inference) and str(self.mac.get_device()) != \"cpu\":\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\controllers\\FeUdal_controller.py\", line 90, in get_device\n return next(self.parameters()).device\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\controllers\\FeUdal_controller.py\", line 69, in parameters\n return self.manager_agent.parameters() + self.worker_agent.parameters()\n",
+ "TypeError: unsupported operand type(s) for +: 'generator' and 'generator'\n"
+ ],
+ "heartbeat": "2024-12-29T18:48:15.264543",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_size": 5000,
+ "env_args": {
+ "map_name": "5m_vs_6m"
+ },
+ "epsilon_anneal_time": 100000,
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "runner": "parallel",
+ "t_max": 100000,
+ "td_lambda": 0.6
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "env_args.map_name=5m_vs_6m",
+ "obs_agent_id=True",
+ "obs_last_action=True",
+ "runner=parallel",
+ "batch_size_run=4",
+ "buffer_size=5000",
+ "t_max=100000",
+ "epsilon_anneal_time=100000",
+ "batch_size=128",
+ "td_lambda=0.6"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2024-12-29T18:48:10.740109",
+ "status": "FAILED",
+ "stop_time": "2024-12-29T18:48:15.266044"
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/6/config.json b/results/sacred/5m_vs_6m/feudal/6/config.json
new file mode 100644
index 0000000..107d563
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/6/config.json
@@ -0,0 +1,97 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "feudal",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_cpu_only": true,
+ "buffer_size": 5000,
+ "c": 1,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_hidden_dim": 64,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "embedding_dim": 16,
+ "enable_parallel_computing": false,
+ "env": "sc2",
+ "env_args": {
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "map_name": "5m_vs_6m",
+ "move_amount": 2,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_pathing_grid": false,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 100000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gamma": 0.99,
+ "goal_dim": 16,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "hypernet_layers": 2,
+ "intrinsic_rewards_alpha": 0.001,
+ "label": "default_label",
+ "learner": "feudal_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.0005,
+ "mac": "feudal_mac",
+ "manager_hidden_dim": 64,
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "feudal",
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "repeat_id": 1,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "parallel",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 204707139,
+ "state_dim": 80,
+ "t_max": 100000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 4,
+ "use_cuda": true,
+ "use_tensorboard": true,
+ "vf_coef": 0.5,
+ "worker_hidden_dim": 64
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/6/cout.txt b/results/sacred/5m_vs_6m/feudal/6/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/5m_vs_6m/feudal/6/info.json b/results/sacred/5m_vs_6m/feudal/6/info.json
new file mode 100644
index 0000000..de3c993
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/6/info.json
@@ -0,0 +1,136 @@
+{
+ "battle_won_mean": [
+ 0.0
+ ],
+ "battle_won_mean_T": [
+ 97
+ ],
+ "dead_allies_mean": [
+ 5.0
+ ],
+ "dead_allies_mean_T": [
+ 97
+ ],
+ "dead_enemies_mean": [
+ 0.0
+ ],
+ "dead_enemies_mean_T": [
+ 97
+ ],
+ "ep_length_mean": [
+ 24.25
+ ],
+ "ep_length_mean_T": [
+ 97
+ ],
+ "epsilon": [
+ 1.0
+ ],
+ "epsilon_T": [
+ 97
+ ],
+ "return_max": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.2641509433962264
+ }
+ ],
+ "return_max_T": [
+ 97
+ ],
+ "return_mean": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.7547169811320753
+ }
+ ],
+ "return_mean_T": [
+ 97
+ ],
+ "return_min": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.3584905660377358
+ }
+ ],
+ "return_min_T": [
+ 97
+ ],
+ "return_std": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.33487244055280846
+ }
+ ],
+ "return_std_T": [
+ 97
+ ],
+ "test_battle_won_mean": [
+ 0.0
+ ],
+ "test_battle_won_mean_T": [
+ 97
+ ],
+ "test_dead_allies_mean": [
+ 0.0
+ ],
+ "test_dead_allies_mean_T": [
+ 97
+ ],
+ "test_dead_enemies_mean": [
+ 0.0
+ ],
+ "test_dead_enemies_mean_T": [
+ 97
+ ],
+ "test_ep_length_mean": [
+ 70.0
+ ],
+ "test_ep_length_mean_T": [
+ 97
+ ],
+ "test_return_max": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.0
+ }
+ ],
+ "test_return_max_T": [
+ 97
+ ],
+ "test_return_mean": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.0
+ }
+ ],
+ "test_return_mean_T": [
+ 97
+ ],
+ "test_return_min": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.0
+ }
+ ],
+ "test_return_min_T": [
+ 97
+ ],
+ "test_return_std": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.0
+ }
+ ],
+ "test_return_std_T": [
+ 97
+ ]
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/6/metrics.json b/results/sacred/5m_vs_6m/feudal/6/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/6/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/6/run.json b/results/sacred/5m_vs_6m/feudal/6/run.json
new file mode 100644
index 0000000..2422191
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/6/run.json
@@ -0,0 +1,132 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_888918ff84cf3bc1e6e9dcc8919870b2.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "fail_trace": [
+ "Traceback (most recent call last):\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\sacred\\config\\captured_function.py\", line 42, in captured_function\n result = wrapped(*args, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\main.py\", line 44, in my_main\n run_REGISTRY[_config['run']](_run, config, _log)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 128, in run\n run_sequential(args=args, logger=logger)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 280, in run_sequential\n learner.train(episode_sample, runner.t_env, episode)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\learners\\FeUdal_learner.py\", line 60, in train\n agent_outs, goals, values = self.mac.forward(batch, t=t)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\controllers\\FeUdal_controller.py\", line 39, in forward\n manager_goal, manager_value, (new_manager_hidden, new_manager_cell) = self.manager_agent(\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\torch\\nn\\modules\\module.py\", line 1194, in _call_impl\n return forward_call(*input, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\modules\\agents\\FeUdal_agent.py\", line 29, in forward\n x = F.relu(self.manager_fc1(inputs))\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\torch\\nn\\modules\\module.py\", line 1194, in _call_impl\n return forward_call(*input, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\torch\\nn\\modules\\linear.py\", line 114, in forward\n return F.linear(input, self.weight, self.bias)\n",
+ "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cpu and cuda:0! (when checking argument for argument mat1 in method wrapper_addmm)\n"
+ ],
+ "heartbeat": "2024-12-29T18:49:53.258783",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_size": 5000,
+ "env_args": {
+ "map_name": "5m_vs_6m"
+ },
+ "epsilon_anneal_time": 100000,
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "runner": "parallel",
+ "t_max": 100000,
+ "td_lambda": 0.6
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "env_args.map_name=5m_vs_6m",
+ "obs_agent_id=True",
+ "obs_last_action=True",
+ "runner=parallel",
+ "batch_size_run=4",
+ "buffer_size=5000",
+ "t_max=100000",
+ "epsilon_anneal_time=100000",
+ "batch_size=128",
+ "td_lambda=0.6"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2024-12-29T18:49:13.819758",
+ "status": "FAILED",
+ "stop_time": "2024-12-29T18:49:53.262287"
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/7/config.json b/results/sacred/5m_vs_6m/feudal/7/config.json
new file mode 100644
index 0000000..1d0ddb3
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/7/config.json
@@ -0,0 +1,97 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "feudal",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_cpu_only": true,
+ "buffer_size": 5000,
+ "c": 1,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_hidden_dim": 64,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "embedding_dim": 16,
+ "enable_parallel_computing": false,
+ "env": "sc2",
+ "env_args": {
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "map_name": "5m_vs_6m",
+ "move_amount": 2,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_pathing_grid": false,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 100000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gamma": 0.99,
+ "goal_dim": 16,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "hypernet_layers": 2,
+ "intrinsic_rewards_alpha": 0.001,
+ "label": "default_label",
+ "learner": "feudal_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.0005,
+ "mac": "feudal_mac",
+ "manager_hidden_dim": 64,
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "feudal",
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "repeat_id": 1,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "parallel",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 349608658,
+ "state_dim": 80,
+ "t_max": 100000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 4,
+ "use_cuda": true,
+ "use_tensorboard": true,
+ "vf_coef": 0.5,
+ "worker_hidden_dim": 64
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/7/cout.txt b/results/sacred/5m_vs_6m/feudal/7/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/5m_vs_6m/feudal/7/metrics.json b/results/sacred/5m_vs_6m/feudal/7/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/7/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/7/run.json b/results/sacred/5m_vs_6m/feudal/7/run.json
new file mode 100644
index 0000000..ed25dd2
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/7/run.json
@@ -0,0 +1,131 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_888918ff84cf3bc1e6e9dcc8919870b2.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "fail_trace": [
+ "Traceback (most recent call last):\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\sacred\\config\\captured_function.py\", line 42, in captured_function\n result = wrapped(*args, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\main.py\", line 44, in my_main\n run_REGISTRY[_config['run']](_run, config, _log)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 128, in run\n run_sequential(args=args, logger=logger)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 261, in run_sequential\n episode_batch = runner.run(test_mode=False)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\runners\\parallel_runner.py\", line 120, in run\n actions = self.mac.select_actions(self.batch, t_ep=self.t, t_env=self.t_env, bs=envs_not_terminated,\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\controllers\\FeUdal_controller.py\", line 30, in select_actions\n agent_outputs, goal_outs, value_outs = self.forward(ep_batch, t_ep, test_mode=test_mode)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\controllers\\FeUdal_controller.py\", line 39, in forward\n manager_goal, manager_value, (new_manager_hidden, new_manager_cell) = self.manager_agent(\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\torch\\nn\\modules\\module.py\", line 1194, in _call_impl\n return forward_call(*input, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\modules\\agents\\FeUdal_agent.py\", line 33, in forward\n hidden = hidden.to(device)\n",
+ "AttributeError: 'tuple' object has no attribute 'to'\n"
+ ],
+ "heartbeat": "2024-12-29T18:51:18.974675",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_size": 5000,
+ "env_args": {
+ "map_name": "5m_vs_6m"
+ },
+ "epsilon_anneal_time": 100000,
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "runner": "parallel",
+ "t_max": 100000,
+ "td_lambda": 0.6
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "env_args.map_name=5m_vs_6m",
+ "obs_agent_id=True",
+ "obs_last_action=True",
+ "runner=parallel",
+ "batch_size_run=4",
+ "buffer_size=5000",
+ "t_max=100000",
+ "epsilon_anneal_time=100000",
+ "batch_size=128",
+ "td_lambda=0.6"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2024-12-29T18:50:58.905345",
+ "status": "FAILED",
+ "stop_time": "2024-12-29T18:51:18.976696"
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/8/config.json b/results/sacred/5m_vs_6m/feudal/8/config.json
new file mode 100644
index 0000000..3923a6f
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/8/config.json
@@ -0,0 +1,97 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "feudal",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_cpu_only": true,
+ "buffer_size": 5000,
+ "c": 1,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_hidden_dim": 64,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "embedding_dim": 16,
+ "enable_parallel_computing": false,
+ "env": "sc2",
+ "env_args": {
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "map_name": "5m_vs_6m",
+ "move_amount": 2,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_pathing_grid": false,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 100000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gamma": 0.99,
+ "goal_dim": 16,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "hypernet_layers": 2,
+ "intrinsic_rewards_alpha": 0.001,
+ "label": "default_label",
+ "learner": "feudal_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.0005,
+ "mac": "feudal_mac",
+ "manager_hidden_dim": 64,
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "feudal",
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "repeat_id": 1,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "parallel",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 748911858,
+ "state_dim": 80,
+ "t_max": 100000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 4,
+ "use_cuda": true,
+ "use_tensorboard": true,
+ "vf_coef": 0.5,
+ "worker_hidden_dim": 64
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/8/cout.txt b/results/sacred/5m_vs_6m/feudal/8/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/5m_vs_6m/feudal/8/metrics.json b/results/sacred/5m_vs_6m/feudal/8/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/8/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/8/run.json b/results/sacred/5m_vs_6m/feudal/8/run.json
new file mode 100644
index 0000000..9362c85
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/8/run.json
@@ -0,0 +1,130 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_888918ff84cf3bc1e6e9dcc8919870b2.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "fail_trace": [
+ "Traceback (most recent call last):\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\sacred\\config\\captured_function.py\", line 42, in captured_function\n result = wrapped(*args, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\main.py\", line 44, in my_main\n run_REGISTRY[_config['run']](_run, config, _log)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 128, in run\n run_sequential(args=args, logger=logger)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 261, in run_sequential\n episode_batch = runner.run(test_mode=False)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\runners\\parallel_runner.py\", line 120, in run\n actions = self.mac.select_actions(self.batch, t_ep=self.t, t_env=self.t_env, bs=envs_not_terminated,\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\controllers\\FeUdal_controller.py\", line 30, in select_actions\n agent_outputs, goal_outs, value_outs = self.forward(ep_batch, t_ep, test_mode=test_mode)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\controllers\\FeUdal_controller.py\", line 46, in forward\n worker_agent_outs, (new_worker_hidden, new_worker_cell), self.single_past_goals, self.batch_past_goals = self.worker_agent(\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\torch\\nn\\modules\\module.py\", line 1194, in _call_impl\n return forward_call(*input, **kwargs)\n",
+ "TypeError: Feudal_WorkerAgent.forward() takes from 3 to 5 positional arguments but 6 were given\n"
+ ],
+ "heartbeat": "2024-12-29T18:52:36.267587",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_size": 5000,
+ "env_args": {
+ "map_name": "5m_vs_6m"
+ },
+ "epsilon_anneal_time": 100000,
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "runner": "parallel",
+ "t_max": 100000,
+ "td_lambda": 0.6
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "env_args.map_name=5m_vs_6m",
+ "obs_agent_id=True",
+ "obs_last_action=True",
+ "runner=parallel",
+ "batch_size_run=4",
+ "buffer_size=5000",
+ "t_max=100000",
+ "epsilon_anneal_time=100000",
+ "batch_size=128",
+ "td_lambda=0.6"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2024-12-29T18:52:12.335022",
+ "status": "FAILED",
+ "stop_time": "2024-12-29T18:52:36.268585"
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/9/config.json b/results/sacred/5m_vs_6m/feudal/9/config.json
new file mode 100644
index 0000000..b08c90d
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/9/config.json
@@ -0,0 +1,97 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "feudal",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_cpu_only": true,
+ "buffer_size": 5000,
+ "c": 1,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_hidden_dim": 64,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "embedding_dim": 16,
+ "enable_parallel_computing": false,
+ "env": "sc2",
+ "env_args": {
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "map_name": "5m_vs_6m",
+ "move_amount": 2,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_pathing_grid": false,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 100000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gamma": 0.99,
+ "goal_dim": 16,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "hypernet_layers": 2,
+ "intrinsic_rewards_alpha": 0.001,
+ "label": "default_label",
+ "learner": "feudal_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.0005,
+ "mac": "feudal_mac",
+ "manager_hidden_dim": 64,
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "feudal",
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "repeat_id": 1,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "parallel",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 859599929,
+ "state_dim": 80,
+ "t_max": 100000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 4,
+ "use_cuda": true,
+ "use_tensorboard": true,
+ "vf_coef": 0.5,
+ "worker_hidden_dim": 64
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/9/cout.txt b/results/sacred/5m_vs_6m/feudal/9/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/5m_vs_6m/feudal/9/metrics.json b/results/sacred/5m_vs_6m/feudal/9/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/9/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/9/run.json b/results/sacred/5m_vs_6m/feudal/9/run.json
new file mode 100644
index 0000000..ecea6e3
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/9/run.json
@@ -0,0 +1,131 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_888918ff84cf3bc1e6e9dcc8919870b2.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "fail_trace": [
+ "Traceback (most recent call last):\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\sacred\\config\\captured_function.py\", line 42, in captured_function\n result = wrapped(*args, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\main.py\", line 44, in my_main\n run_REGISTRY[_config['run']](_run, config, _log)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 128, in run\n run_sequential(args=args, logger=logger)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 261, in run_sequential\n episode_batch = runner.run(test_mode=False)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\runners\\parallel_runner.py\", line 120, in run\n actions = self.mac.select_actions(self.batch, t_ep=self.t, t_env=self.t_env, bs=envs_not_terminated,\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\controllers\\FeUdal_controller.py\", line 30, in select_actions\n agent_outputs, goal_outs, value_outs = self.forward(ep_batch, t_ep, test_mode=test_mode)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\controllers\\FeUdal_controller.py\", line 46, in forward\n worker_agent_outs, (new_worker_hidden, new_worker_cell), self.single_past_goals, self.batch_past_goals = self.worker_agent(\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\torch\\nn\\modules\\module.py\", line 1194, in _call_impl\n return forward_call(*input, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\modules\\agents\\FeUdal_agent.py\", line 84, in forward\n goal = goal.to(device)\n",
+ "AttributeError: 'tuple' object has no attribute 'to'\n"
+ ],
+ "heartbeat": "2024-12-29T18:58:43.505662",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_size": 5000,
+ "env_args": {
+ "map_name": "5m_vs_6m"
+ },
+ "epsilon_anneal_time": 100000,
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "runner": "parallel",
+ "t_max": 100000,
+ "td_lambda": 0.6
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "env_args.map_name=5m_vs_6m",
+ "obs_agent_id=True",
+ "obs_last_action=True",
+ "runner=parallel",
+ "batch_size_run=4",
+ "buffer_size=5000",
+ "t_max=100000",
+ "epsilon_anneal_time=100000",
+ "batch_size=128",
+ "td_lambda=0.6"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2024-12-29T18:58:22.158179",
+ "status": "FAILED",
+ "stop_time": "2024-12-29T18:58:43.507170"
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/feudal/_sources/logging_f71df6d788e929fac28afdf951d63d54.py b/results/sacred/5m_vs_6m/feudal/_sources/logging_f71df6d788e929fac28afdf951d63d54.py
new file mode 100644
index 0000000..5393b7f
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/_sources/logging_f71df6d788e929fac28afdf951d63d54.py
@@ -0,0 +1,68 @@
+from collections import defaultdict
+import logging
+import numpy as np
+import torch as th
+
+class Logger:
+ def __init__(self, console_logger):
+ self.console_logger = console_logger
+
+ self.use_tb = False
+ self.use_sacred = False
+ self.use_hdf = False
+
+ self.stats = defaultdict(lambda: [])
+
+ def setup_tb(self, directory_name):
+ # Import here so it doesn't have to be installed if you don't use it
+ from tensorboard_logger import configure, log_value
+ configure(directory_name)
+ self.tb_logger = log_value
+ self.use_tb = True
+
+ def setup_sacred(self, sacred_run_dict):
+ self.sacred_info = sacred_run_dict.info
+ self.use_sacred = True
+
+ def log_stat(self, key, value, t, to_sacred=True):
+ self.stats[key].append((t, value))
+
+ if self.use_tb:
+ self.tb_logger(key, value, t)
+
+ if self.use_sacred and to_sacred:
+ if key in self.sacred_info:
+ self.sacred_info["{}_T".format(key)].append(t)
+ self.sacred_info[key].append(value)
+ else:
+ self.sacred_info["{}_T".format(key)] = [t]
+ self.sacred_info[key] = [value]
+
+ def print_recent_stats(self):
+ log_str = "Recent Stats | t_env: {:>10} | Episode: {:>8}\n".format(*self.stats["episode"][-1])
+ i = 0
+ for (k, v) in sorted(self.stats.items()):
+ if k == "episode":
+ continue
+ i += 1
+ window = 5 if k != "epsilon" else 1
+ item = "{:.4f}".format(th.mean(th.tensor([float(x[1]) for x in self.stats[k][-window:]])))
+ log_str += "{:<25}{:>8}".format(k + ":", item)
+ log_str += "\n" if i % 4 == 0 else "\t"
+ self.console_logger.info(log_str)
+ # Reset stats to avoid accumulating logs in memory
+ self.stats = defaultdict(lambda: [])
+
+
+# set up a custom logger
+def get_logger():
+ logger = logging.getLogger()
+ logger.handlers = []
+ ch = logging.StreamHandler()
+ formatter = logging.Formatter('[%(levelname)s %(asctime)s] %(name)s %(message)s', '%H:%M:%S')
+ ch.setFormatter(formatter)
+ logger.addHandler(ch)
+ logger.setLevel('DEBUG')
+
+ return logger
+
diff --git a/results/sacred/5m_vs_6m/feudal/_sources/main_888918ff84cf3bc1e6e9dcc8919870b2.py b/results/sacred/5m_vs_6m/feudal/_sources/main_888918ff84cf3bc1e6e9dcc8919870b2.py
new file mode 100644
index 0000000..47de339
--- /dev/null
+++ b/results/sacred/5m_vs_6m/feudal/_sources/main_888918ff84cf3bc1e6e9dcc8919870b2.py
@@ -0,0 +1,124 @@
+import random
+
+import numpy as np
+import os
+import collections
+from os.path import dirname, abspath, join
+from copy import deepcopy
+from sacred import Experiment, SETTINGS
+from sacred.observers import FileStorageObserver
+from sacred.utils import apply_backspaces_and_linefeeds
+import sys
+import torch as th
+from utils.logging import get_logger
+import yaml
+import collections.abc
+
+from run import REGISTRY as run_REGISTRY
+
+SETTINGS['CAPTURE_MODE'] = "fd" # set to "no" if you want to see stdout/stderr in console
+logger = get_logger()
+
+ex = Experiment("pymarl")
+ex.logger = logger
+ex.captured_out_filter = apply_backspaces_and_linefeeds
+
+results_path = join(dirname(dirname(abspath(__file__))))
+
+
+@ex.main
+def my_main(_run, _config, _log):
+ # Setting the random seed throughout the modules
+ config = config_copy(_config)
+ random.seed(config["seed"])
+ np.random.seed(config["seed"])
+ th.manual_seed(config["seed"])
+ th.cuda.manual_seed(config["seed"])
+ # th.cuda.manual_seed_all(config["seed"])
+ th.backends.cudnn.deterministic = True # cudnn
+
+
+ config['env_args']['seed'] = config["seed"]
+
+ # run
+ run_REGISTRY[_config['run']](_run, config, _log)
+
+
+def _get_config(params, arg_name, subfolder):
+ config_name = None
+ for _i, _v in enumerate(params):
+ if _v.split("=")[0] == arg_name:
+ config_name = _v.split("=")[1]
+ del params[_i]
+ break
+
+ if config_name is not None:
+ with open(os.path.join(os.path.dirname(__file__), "config", subfolder, "{}.yaml".format(config_name)),
+ "r") as f:
+ try:
+ config_dict = yaml.load(f, Loader=yaml.SafeLoader)
+ except yaml.YAMLError as exc:
+ assert False, "{}.yaml error: {}".format(config_name, exc)
+ return config_dict
+
+
+def recursive_dict_update(d, u):
+ for k, v in u.items():
+ if isinstance(v, collections.abc.Mapping):
+ d[k] = recursive_dict_update(d.get(k, {}), v)
+ else:
+ d[k] = v
+ return d
+
+
+def config_copy(config):
+ if isinstance(config, dict):
+ return {k: config_copy(v) for k, v in config.items()}
+ elif isinstance(config, list):
+ return [config_copy(v) for v in config]
+ else:
+ return deepcopy(config)
+
+
+def parse_command(params, key, default):
+ result = default
+ for _i, _v in enumerate(params):
+ if _v.split("=")[0].strip() == key:
+ result = _v[_v.index('=') + 1:].strip()
+ break
+ return result
+
+
+if __name__ == '__main__':
+ params = deepcopy(sys.argv)
+
+ # Get the defaults from default.yaml
+ with open(os.path.join(os.path.dirname(__file__), "config", "default.yaml"), "r") as f:
+ try:
+ config_dict = yaml.load(f, Loader=yaml.SafeLoader)
+ except yaml.YAMLError as exc:
+ assert False, "default.yaml error: {}".format(exc)
+
+ # Load algorithm and env base configs
+ env_config = _get_config(params, "--env-config", "envs")
+ alg_config = _get_config(params, "--config", "algs")
+ # config_dict = {**config_dict, **env_config, **alg_config}
+ config_dict = recursive_dict_update(config_dict, env_config)
+ config_dict = recursive_dict_update(config_dict, alg_config)
+
+ # now add all the config to sacred
+ ex.add_config(config_dict)
+
+ # Save to disk by default for sacred
+ map_name = parse_command(params, "env_args.map_name", config_dict['env_args']['map_name'])
+ algo_name = parse_command(params, "name", config_dict['name'])
+ local_results_path = parse_command(params, "local_results_path", config_dict['local_results_path'])
+ file_obs_path = join(results_path, local_results_path, "sacred", map_name, algo_name)
+
+ logger.info("Saving to FileStorageObserver in {}.".format(file_obs_path))
+ ex.observers.append(FileStorageObserver.create(file_obs_path))
+
+ ex.run_commandline(params)
+
+ # flush
+ sys.stdout.flush()
diff --git a/results/sacred/5m_vs_6m/qmix/1/config.json b/results/sacred/5m_vs_6m/qmix/1/config.json
new file mode 100644
index 0000000..c0e38d9
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/1/config.json
@@ -0,0 +1,96 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "n_rnn",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 128,
+ "batch_size_run": 8,
+ "buffer_cpu_only": true,
+ "buffer_size": 5000,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "enable_parallel_computing": false,
+ "env": "sc2",
+ "env_args": {
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "map_name": "5m_vs_6m",
+ "move_amount": 2,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_pathing_grid": false,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 100000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gain": 0.01,
+ "gamma": 0.99,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "label": "default_label",
+ "learner": "nq_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.001,
+ "mac": "n_mac",
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "qmix",
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "optimizer": "adam",
+ "per_alpha": 0.6,
+ "per_beta": 0.4,
+ "q_lambda": false,
+ "repeat_id": 1,
+ "return_priority": false,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "parallel",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 437873842,
+ "t_max": 10050000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 12,
+ "use_cuda": true,
+ "use_layer_norm": false,
+ "use_orthogonal": false,
+ "use_per": false,
+ "use_tensorboard": true
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/1/cout.txt b/results/sacred/5m_vs_6m/qmix/1/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/5m_vs_6m/qmix/1/metrics.json b/results/sacred/5m_vs_6m/qmix/1/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/1/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/1/run.json b/results/sacred/5m_vs_6m/qmix/1/run.json
new file mode 100644
index 0000000..1344538
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/1/run.json
@@ -0,0 +1,129 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_888918ff84cf3bc1e6e9dcc8919870b2.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "fail_trace": [
+ "Traceback (most recent call last):\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\sacred\\config\\captured_function.py\", line 42, in captured_function\n result = wrapped(*args, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\main.py\", line 44, in my_main\n run_REGISTRY[_config['run']](_run, config, _log)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 128, in run\n run_sequential(args=args, logger=logger)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 203, in run_sequential\n mac = mac_REGISTRY[args.mac](buffer.scheme, groups, args)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\controllers\\n_controller.py\", line 13, in __init__\n super(NMAC, self).__init__(scheme, groups, args)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\controllers\\basic_controller.py\", line 15, in __init__\n self._build_agents(self.input_shape)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\controllers\\basic_controller.py\", line 83, in _build_agents\n print(\"&&&&&&&&&&&&&&&&&&&&&&\", self.args.agent, get_parameters_num(self.parameters()))\n",
+ "OSError: [WinError 1] \u529f\u80fd\u932f\u8aa4\u3002\n"
+ ],
+ "heartbeat": "2024-12-29T18:06:39.507641",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "batch_size": 128,
+ "batch_size_run": 8,
+ "buffer_size": 5000,
+ "env_args": {
+ "map_name": "5m_vs_6m"
+ },
+ "epsilon_anneal_time": 100000,
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "runner": "parallel",
+ "t_max": 10050000,
+ "td_lambda": 0.6
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "env_args.map_name=5m_vs_6m",
+ "obs_agent_id=True",
+ "obs_last_action=True",
+ "runner=parallel",
+ "batch_size_run=8",
+ "buffer_size=5000",
+ "t_max=10050000",
+ "epsilon_anneal_time=100000",
+ "batch_size=128",
+ "td_lambda=0.6"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2024-12-29T18:06:32.973993",
+ "status": "FAILED",
+ "stop_time": "2024-12-29T18:06:39.509138"
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/10/config.json b/results/sacred/5m_vs_6m/qmix/10/config.json
new file mode 100644
index 0000000..f2e4be9
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/10/config.json
@@ -0,0 +1,96 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "n_rnn",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 128,
+ "batch_size_run": 8,
+ "buffer_cpu_only": true,
+ "buffer_size": 5000,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "enable_parallel_computing": false,
+ "env": "sc2",
+ "env_args": {
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "map_name": "5m_vs_6m",
+ "move_amount": 2,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_pathing_grid": false,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 100000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gain": 0.01,
+ "gamma": 0.99,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "label": "default_label",
+ "learner": "nq_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.001,
+ "mac": "n_mac",
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "qmix",
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "optimizer": "adam",
+ "per_alpha": 0.6,
+ "per_beta": 0.4,
+ "q_lambda": false,
+ "repeat_id": 1,
+ "return_priority": false,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "parallel",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 304770793,
+ "t_max": 10050000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 4,
+ "use_cuda": true,
+ "use_layer_norm": false,
+ "use_orthogonal": false,
+ "use_per": false,
+ "use_tensorboard": true
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/10/cout.txt b/results/sacred/5m_vs_6m/qmix/10/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/5m_vs_6m/qmix/10/metrics.json b/results/sacred/5m_vs_6m/qmix/10/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/10/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/10/run.json b/results/sacred/5m_vs_6m/qmix/10/run.json
new file mode 100644
index 0000000..22ce8ec
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/10/run.json
@@ -0,0 +1,118 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_888918ff84cf3bc1e6e9dcc8919870b2.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "heartbeat": "2024-12-29T19:16:35.377438",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "batch_size": 128,
+ "batch_size_run": 8,
+ "buffer_size": 5000,
+ "env_args": {
+ "map_name": "5m_vs_6m"
+ },
+ "epsilon_anneal_time": 100000,
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "runner": "parallel",
+ "t_max": 10050000,
+ "td_lambda": 0.6
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "env_args.map_name=5m_vs_6m",
+ "obs_agent_id=True",
+ "obs_last_action=True",
+ "runner=parallel",
+ "batch_size_run=8",
+ "buffer_size=5000",
+ "t_max=10050000",
+ "epsilon_anneal_time=100000",
+ "batch_size=128",
+ "td_lambda=0.6"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2024-12-29T19:15:59.846215",
+ "status": "INTERRUPTED",
+ "stop_time": "2024-12-29T19:16:35.379936"
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/11/config.json b/results/sacred/5m_vs_6m/qmix/11/config.json
new file mode 100644
index 0000000..e4e72ac
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/11/config.json
@@ -0,0 +1,96 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "n_rnn",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 128,
+ "batch_size_run": 8,
+ "buffer_cpu_only": true,
+ "buffer_size": 5000,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "enable_parallel_computing": false,
+ "env": "sc2",
+ "env_args": {
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "map_name": "5m_vs_6m",
+ "move_amount": 2,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_pathing_grid": false,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 100000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gain": 0.01,
+ "gamma": 0.99,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "label": "default_label",
+ "learner": "nq_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.001,
+ "mac": "n_mac",
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "qmix",
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "optimizer": "adam",
+ "per_alpha": 0.6,
+ "per_beta": 0.4,
+ "q_lambda": false,
+ "repeat_id": 1,
+ "return_priority": false,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "parallel",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 336442794,
+ "t_max": 10050000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 4,
+ "use_cuda": true,
+ "use_layer_norm": false,
+ "use_orthogonal": false,
+ "use_per": false,
+ "use_tensorboard": true
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/11/cout.txt b/results/sacred/5m_vs_6m/qmix/11/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/5m_vs_6m/qmix/11/metrics.json b/results/sacred/5m_vs_6m/qmix/11/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/11/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/11/run.json b/results/sacred/5m_vs_6m/qmix/11/run.json
new file mode 100644
index 0000000..7d0308e
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/11/run.json
@@ -0,0 +1,104 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_888918ff84cf3bc1e6e9dcc8919870b2.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "heartbeat": "2024-12-29T19:17:57.520391",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "env_args": {
+ "map_name": "5m_vs_6m"
+ },
+ "obs_agent_id": true,
+ "obs_last_action": true
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "env_args.map_name=5m_vs_6m",
+ "obs_agent_id=True",
+ "obs_last_action=True"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2024-12-29T19:17:02.366573",
+ "status": "INTERRUPTED",
+ "stop_time": "2024-12-29T19:17:57.522898"
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/2/config.json b/results/sacred/5m_vs_6m/qmix/2/config.json
new file mode 100644
index 0000000..9126a84
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/2/config.json
@@ -0,0 +1,96 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "n_rnn",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 128,
+ "batch_size_run": 8,
+ "buffer_cpu_only": true,
+ "buffer_size": 5000,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "enable_parallel_computing": false,
+ "env": "sc2",
+ "env_args": {
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "map_name": "5m_vs_6m",
+ "move_amount": 2,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_pathing_grid": false,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 100000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gain": 0.01,
+ "gamma": 0.99,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "label": "default_label",
+ "learner": "nq_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.001,
+ "mac": "n_mac",
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "qmix",
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "optimizer": "adam",
+ "per_alpha": 0.6,
+ "per_beta": 0.4,
+ "q_lambda": false,
+ "repeat_id": 1,
+ "return_priority": false,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "parallel",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 2441757,
+ "t_max": 10050000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 12,
+ "use_cuda": true,
+ "use_layer_norm": false,
+ "use_orthogonal": false,
+ "use_per": false,
+ "use_tensorboard": true
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/2/cout.txt b/results/sacred/5m_vs_6m/qmix/2/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/5m_vs_6m/qmix/2/metrics.json b/results/sacred/5m_vs_6m/qmix/2/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/2/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/2/run.json b/results/sacred/5m_vs_6m/qmix/2/run.json
new file mode 100644
index 0000000..f372ceb
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/2/run.json
@@ -0,0 +1,127 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_888918ff84cf3bc1e6e9dcc8919870b2.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "fail_trace": [
+ "Traceback (most recent call last):\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\sacred\\config\\captured_function.py\", line 42, in captured_function\n result = wrapped(*args, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\main.py\", line 44, in my_main\n run_REGISTRY[_config['run']](_run, config, _log)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 128, in run\n run_sequential(args=args, logger=logger)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 206, in run_sequential\n runner.setup(scheme=scheme, groups=groups, preprocess=preprocess, mac=mac)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\runners\\parallel_runner.py\", line 55, in setup\n print(\" &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& self.batch_device={}\".format(\n",
+ "OSError: [WinError 1] \u529f\u80fd\u932f\u8aa4\u3002\n"
+ ],
+ "heartbeat": "2024-12-29T18:07:57.434397",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "batch_size": 128,
+ "batch_size_run": 8,
+ "buffer_size": 5000,
+ "env_args": {
+ "map_name": "5m_vs_6m"
+ },
+ "epsilon_anneal_time": 100000,
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "runner": "parallel",
+ "t_max": 10050000,
+ "td_lambda": 0.6
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "env_args.map_name=5m_vs_6m",
+ "obs_agent_id=True",
+ "obs_last_action=True",
+ "runner=parallel",
+ "batch_size_run=8",
+ "buffer_size=5000",
+ "t_max=10050000",
+ "epsilon_anneal_time=100000",
+ "batch_size=128",
+ "td_lambda=0.6"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2024-12-29T18:07:51.341620",
+ "status": "FAILED",
+ "stop_time": "2024-12-29T18:07:57.435895"
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/3/config.json b/results/sacred/5m_vs_6m/qmix/3/config.json
new file mode 100644
index 0000000..a2f026d
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/3/config.json
@@ -0,0 +1,96 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "n_rnn",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 128,
+ "batch_size_run": 8,
+ "buffer_cpu_only": true,
+ "buffer_size": 5000,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "enable_parallel_computing": false,
+ "env": "sc2",
+ "env_args": {
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "map_name": "5m_vs_6m",
+ "move_amount": 2,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_pathing_grid": false,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 100000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gain": 0.01,
+ "gamma": 0.99,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "label": "default_label",
+ "learner": "nq_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.001,
+ "mac": "n_mac",
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "qmix",
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "optimizer": "adam",
+ "per_alpha": 0.6,
+ "per_beta": 0.4,
+ "q_lambda": false,
+ "repeat_id": 1,
+ "return_priority": false,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "parallel",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 610338225,
+ "t_max": 10050000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 12,
+ "use_cuda": true,
+ "use_layer_norm": false,
+ "use_orthogonal": false,
+ "use_per": false,
+ "use_tensorboard": true
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/3/cout.txt b/results/sacred/5m_vs_6m/qmix/3/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/5m_vs_6m/qmix/3/metrics.json b/results/sacred/5m_vs_6m/qmix/3/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/3/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/3/run.json b/results/sacred/5m_vs_6m/qmix/3/run.json
new file mode 100644
index 0000000..155e9ff
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/3/run.json
@@ -0,0 +1,127 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_888918ff84cf3bc1e6e9dcc8919870b2.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "fail_trace": [
+ "Traceback (most recent call last):\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\sacred\\config\\captured_function.py\", line 42, in captured_function\n result = wrapped(*args, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\main.py\", line 44, in my_main\n run_REGISTRY[_config['run']](_run, config, _log)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 128, in run\n run_sequential(args=args, logger=logger)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 209, in run_sequential\n learner = le_REGISTRY[args.learner](mac, buffer.scheme, logger, args)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\learners\\nq_learner.py\", line 75, in __init__\n print('Mixer Size: ')\n",
+ "OSError: [WinError 1] \u529f\u80fd\u932f\u8aa4\u3002\n"
+ ],
+ "heartbeat": "2024-12-29T18:08:50.856920",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "batch_size": 128,
+ "batch_size_run": 8,
+ "buffer_size": 5000,
+ "env_args": {
+ "map_name": "5m_vs_6m"
+ },
+ "epsilon_anneal_time": 100000,
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "runner": "parallel",
+ "t_max": 10050000,
+ "td_lambda": 0.6
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "env_args.map_name=5m_vs_6m",
+ "obs_agent_id=True",
+ "obs_last_action=True",
+ "runner=parallel",
+ "batch_size_run=8",
+ "buffer_size=5000",
+ "t_max=10050000",
+ "epsilon_anneal_time=100000",
+ "batch_size=128",
+ "td_lambda=0.6"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2024-12-29T18:08:44.591867",
+ "status": "FAILED",
+ "stop_time": "2024-12-29T18:08:50.858921"
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/4/config.json b/results/sacred/5m_vs_6m/qmix/4/config.json
new file mode 100644
index 0000000..ebccf8c
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/4/config.json
@@ -0,0 +1,96 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "n_rnn",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 128,
+ "batch_size_run": 8,
+ "buffer_cpu_only": true,
+ "buffer_size": 5000,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "enable_parallel_computing": false,
+ "env": "sc2",
+ "env_args": {
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "map_name": "5m_vs_6m",
+ "move_amount": 2,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_pathing_grid": false,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 100000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gain": 0.01,
+ "gamma": 0.99,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "label": "default_label",
+ "learner": "nq_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.001,
+ "mac": "n_mac",
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "qmix",
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "optimizer": "adam",
+ "per_alpha": 0.6,
+ "per_beta": 0.4,
+ "q_lambda": false,
+ "repeat_id": 1,
+ "return_priority": false,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "parallel",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 133511293,
+ "t_max": 10050000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 12,
+ "use_cuda": true,
+ "use_layer_norm": false,
+ "use_orthogonal": false,
+ "use_per": false,
+ "use_tensorboard": true
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/4/cout.txt b/results/sacred/5m_vs_6m/qmix/4/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/5m_vs_6m/qmix/4/info.json b/results/sacred/5m_vs_6m/qmix/4/info.json
new file mode 100644
index 0000000..b630a3b
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/4/info.json
@@ -0,0 +1,136 @@
+{
+ "battle_won_mean": [
+ 0.0
+ ],
+ "battle_won_mean_T": [
+ 211
+ ],
+ "dead_allies_mean": [
+ 5.0
+ ],
+ "dead_allies_mean_T": [
+ 211
+ ],
+ "dead_enemies_mean": [
+ 0.0
+ ],
+ "dead_enemies_mean_T": [
+ 211
+ ],
+ "ep_length_mean": [
+ 26.375
+ ],
+ "ep_length_mean_T": [
+ 211
+ ],
+ "epsilon": [
+ 1.0
+ ],
+ "epsilon_T": [
+ 211
+ ],
+ "return_max": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.3584905660377358
+ }
+ ],
+ "return_max_T": [
+ 211
+ ],
+ "return_mean": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.2169811320754718
+ }
+ ],
+ "return_mean_T": [
+ 211
+ ],
+ "return_min": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.9056603773584906
+ }
+ ],
+ "return_min_T": [
+ 211
+ ],
+ "return_std": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.1575782366838685
+ }
+ ],
+ "return_std_T": [
+ 211
+ ],
+ "test_battle_won_mean": [
+ 0.0
+ ],
+ "test_battle_won_mean_T": [
+ 211
+ ],
+ "test_dead_allies_mean": [
+ 5.0
+ ],
+ "test_dead_allies_mean_T": [
+ 211
+ ],
+ "test_dead_enemies_mean": [
+ 1.0
+ ],
+ "test_dead_enemies_mean_T": [
+ 211
+ ],
+ "test_ep_length_mean": [
+ 18.09375
+ ],
+ "test_ep_length_mean_T": [
+ 211
+ ],
+ "test_return_max": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.5283018867924527
+ }
+ ],
+ "test_return_max_T": [
+ 211
+ ],
+ "test_return_mean": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.1745283018867925
+ }
+ ],
+ "test_return_mean_T": [
+ 211
+ ],
+ "test_return_min": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.0754716981132075
+ }
+ ],
+ "test_return_min_T": [
+ 211
+ ],
+ "test_return_std": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.13792633506805135
+ }
+ ],
+ "test_return_std_T": [
+ 211
+ ]
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/4/metrics.json b/results/sacred/5m_vs_6m/qmix/4/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/4/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/4/run.json b/results/sacred/5m_vs_6m/qmix/4/run.json
new file mode 100644
index 0000000..3e8d626
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/4/run.json
@@ -0,0 +1,127 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_888918ff84cf3bc1e6e9dcc8919870b2.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "fail_trace": [
+ "Traceback (most recent call last):\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\sacred\\config\\captured_function.py\", line 42, in captured_function\n result = wrapped(*args, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\main.py\", line 44, in my_main\n run_REGISTRY[_config['run']](_run, config, _log)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 128, in run\n run_sequential(args=args, logger=logger)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 280, in run_sequential\n learner.train(episode_sample, runner.t_env, episode)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\learners\\nq_learner.py\", line 183, in train\n print(\"Avg cost {} seconds\".format(self.avg_time))\n",
+ "OSError: [WinError 1] \u529f\u80fd\u932f\u8aa4\u3002\n"
+ ],
+ "heartbeat": "2024-12-29T18:12:22.629336",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "batch_size": 128,
+ "batch_size_run": 8,
+ "buffer_size": 5000,
+ "env_args": {
+ "map_name": "5m_vs_6m"
+ },
+ "epsilon_anneal_time": 100000,
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "runner": "parallel",
+ "t_max": 10050000,
+ "td_lambda": 0.6
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "env_args.map_name=5m_vs_6m",
+ "obs_agent_id=True",
+ "obs_last_action=True",
+ "runner=parallel",
+ "batch_size_run=8",
+ "buffer_size=5000",
+ "t_max=10050000",
+ "epsilon_anneal_time=100000",
+ "batch_size=128",
+ "td_lambda=0.6"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2024-12-29T18:11:28.868612",
+ "status": "FAILED",
+ "stop_time": "2024-12-29T18:12:22.633852"
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/5/config.json b/results/sacred/5m_vs_6m/qmix/5/config.json
new file mode 100644
index 0000000..67621f7
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/5/config.json
@@ -0,0 +1,96 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "n_rnn",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 128,
+ "batch_size_run": 8,
+ "buffer_cpu_only": true,
+ "buffer_size": 5000,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "enable_parallel_computing": false,
+ "env": "sc2",
+ "env_args": {
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "map_name": "5m_vs_6m",
+ "move_amount": 2,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_pathing_grid": false,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 100000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gain": 0.01,
+ "gamma": 0.99,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "label": "default_label",
+ "learner": "nq_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.001,
+ "mac": "n_mac",
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "qmix",
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "optimizer": "adam",
+ "per_alpha": 0.6,
+ "per_beta": 0.4,
+ "q_lambda": false,
+ "repeat_id": 1,
+ "return_priority": false,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "parallel",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 985419632,
+ "t_max": 10050000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 12,
+ "use_cuda": true,
+ "use_layer_norm": false,
+ "use_orthogonal": false,
+ "use_per": false,
+ "use_tensorboard": true
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/5/cout.txt b/results/sacred/5m_vs_6m/qmix/5/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/5m_vs_6m/qmix/5/info.json b/results/sacred/5m_vs_6m/qmix/5/info.json
new file mode 100644
index 0000000..20a4d19
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/5/info.json
@@ -0,0 +1,136 @@
+{
+ "battle_won_mean": [
+ 0.0
+ ],
+ "battle_won_mean_T": [
+ 188
+ ],
+ "dead_allies_mean": [
+ 5.0
+ ],
+ "dead_allies_mean_T": [
+ 188
+ ],
+ "dead_enemies_mean": [
+ 0.0
+ ],
+ "dead_enemies_mean_T": [
+ 188
+ ],
+ "ep_length_mean": [
+ 23.5
+ ],
+ "ep_length_mean_T": [
+ 188
+ ],
+ "epsilon": [
+ 1.0
+ ],
+ "epsilon_T": [
+ 188
+ ],
+ "return_max": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.490566037735849
+ }
+ ],
+ "return_max_T": [
+ 188
+ ],
+ "return_mean": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.4716981132075473
+ }
+ ],
+ "return_mean_T": [
+ 188
+ ],
+ "return_min": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.1320754716981132
+ }
+ ],
+ "return_min_T": [
+ 188
+ ],
+ "return_std": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.45283018867924524
+ }
+ ],
+ "return_std_T": [
+ 188
+ ],
+ "test_battle_won_mean": [
+ 0.0
+ ],
+ "test_battle_won_mean_T": [
+ 188
+ ],
+ "test_dead_allies_mean": [
+ 1.875
+ ],
+ "test_dead_allies_mean_T": [
+ 188
+ ],
+ "test_dead_enemies_mean": [
+ 0.0
+ ],
+ "test_dead_enemies_mean_T": [
+ 188
+ ],
+ "test_ep_length_mean": [
+ 68.84375
+ ],
+ "test_ep_length_mean_T": [
+ 188
+ ],
+ "test_return_max": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.1320754716981132
+ }
+ ],
+ "test_return_max_T": [
+ 188
+ ],
+ "test_return_mean": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.4033018867924528
+ }
+ ],
+ "test_return_mean_T": [
+ 188
+ ],
+ "test_return_min": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.22641509433962265
+ }
+ ],
+ "test_return_min_T": [
+ 188
+ ],
+ "test_return_std": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.27576191419676993
+ }
+ ],
+ "test_return_std_T": [
+ 188
+ ]
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/5/metrics.json b/results/sacred/5m_vs_6m/qmix/5/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/5/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/5/run.json b/results/sacred/5m_vs_6m/qmix/5/run.json
new file mode 100644
index 0000000..f8831ed
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/5/run.json
@@ -0,0 +1,127 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_888918ff84cf3bc1e6e9dcc8919870b2.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "fail_trace": [
+ "Traceback (most recent call last):\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\sacred\\config\\captured_function.py\", line 42, in captured_function\n result = wrapped(*args, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\main.py\", line 44, in my_main\n run_REGISTRY[_config['run']](_run, config, _log)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 128, in run\n run_sequential(args=args, logger=logger)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 280, in run_sequential\n learner.train(episode_sample, runner.t_env, episode)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\learners\\nq_learner.py\", line 184, in train\n print('avg_time: ', self.avg_time)\n",
+ "OSError: [WinError 1] \u529f\u80fd\u932f\u8aa4\u3002\n"
+ ],
+ "heartbeat": "2024-12-29T18:15:38.197849",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "batch_size": 128,
+ "batch_size_run": 8,
+ "buffer_size": 5000,
+ "env_args": {
+ "map_name": "5m_vs_6m"
+ },
+ "epsilon_anneal_time": 100000,
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "runner": "parallel",
+ "t_max": 10050000,
+ "td_lambda": 0.6
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "env_args.map_name=5m_vs_6m",
+ "obs_agent_id=True",
+ "obs_last_action=True",
+ "runner=parallel",
+ "batch_size_run=8",
+ "buffer_size=5000",
+ "t_max=10050000",
+ "epsilon_anneal_time=100000",
+ "batch_size=128",
+ "td_lambda=0.6"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2024-12-29T18:14:47.245762",
+ "status": "FAILED",
+ "stop_time": "2024-12-29T18:15:38.201847"
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/6/config.json b/results/sacred/5m_vs_6m/qmix/6/config.json
new file mode 100644
index 0000000..da55123
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/6/config.json
@@ -0,0 +1,96 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "n_rnn",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 128,
+ "batch_size_run": 8,
+ "buffer_cpu_only": true,
+ "buffer_size": 5000,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "enable_parallel_computing": false,
+ "env": "sc2",
+ "env_args": {
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "map_name": "5m_vs_6m",
+ "move_amount": 2,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_pathing_grid": false,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 100000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gain": 0.01,
+ "gamma": 0.99,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "label": "default_label",
+ "learner": "nq_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.001,
+ "mac": "n_mac",
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "qmix",
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "optimizer": "adam",
+ "per_alpha": 0.6,
+ "per_beta": 0.4,
+ "q_lambda": false,
+ "repeat_id": 1,
+ "return_priority": false,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "parallel",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 860875643,
+ "t_max": 10050000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 4,
+ "use_cuda": true,
+ "use_layer_norm": false,
+ "use_orthogonal": false,
+ "use_per": false,
+ "use_tensorboard": true
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/6/cout.txt b/results/sacred/5m_vs_6m/qmix/6/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/5m_vs_6m/qmix/6/info.json b/results/sacred/5m_vs_6m/qmix/6/info.json
new file mode 100644
index 0000000..fcdcf69
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/6/info.json
@@ -0,0 +1,136 @@
+{
+ "battle_won_mean": [
+ 0.0
+ ],
+ "battle_won_mean_T": [
+ 196
+ ],
+ "dead_allies_mean": [
+ 5.0
+ ],
+ "dead_allies_mean_T": [
+ 196
+ ],
+ "dead_enemies_mean": [
+ 0.0
+ ],
+ "dead_enemies_mean_T": [
+ 196
+ ],
+ "ep_length_mean": [
+ 24.5
+ ],
+ "ep_length_mean_T": [
+ 196
+ ],
+ "epsilon": [
+ 1.0
+ ],
+ "epsilon_T": [
+ 196
+ ],
+ "return_max": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.2641509433962264
+ }
+ ],
+ "return_max_T": [
+ 196
+ ],
+ "return_mean": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.2735849056603774
+ }
+ ],
+ "return_mean_T": [
+ 196
+ ],
+ "return_min": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.4528301886792453
+ }
+ ],
+ "return_min_T": [
+ 196
+ ],
+ "return_std": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.5421861526604155
+ }
+ ],
+ "return_std_T": [
+ 196
+ ],
+ "test_battle_won_mean": [
+ 0.0
+ ],
+ "test_battle_won_mean_T": [
+ 196
+ ],
+ "test_dead_allies_mean": [
+ 0.21875
+ ],
+ "test_dead_allies_mean_T": [
+ 196
+ ],
+ "test_dead_enemies_mean": [
+ 0.03125
+ ],
+ "test_dead_enemies_mean_T": [
+ 196
+ ],
+ "test_ep_length_mean": [
+ 68.875
+ ],
+ "test_ep_length_mean_T": [
+ 196
+ ],
+ "test_return_max": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.433962264150943
+ }
+ ],
+ "test_return_max_T": [
+ 196
+ ],
+ "test_return_mean": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.1497641509433962
+ }
+ ],
+ "test_return_mean_T": [
+ 196
+ ],
+ "test_return_min": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.0
+ }
+ ],
+ "test_return_min_T": [
+ 196
+ ],
+ "test_return_std": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.612298627906809
+ }
+ ],
+ "test_return_std_T": [
+ 196
+ ]
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/6/metrics.json b/results/sacred/5m_vs_6m/qmix/6/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/6/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/6/run.json b/results/sacred/5m_vs_6m/qmix/6/run.json
new file mode 100644
index 0000000..f016f31
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/6/run.json
@@ -0,0 +1,127 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_888918ff84cf3bc1e6e9dcc8919870b2.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "fail_trace": [
+ "Traceback (most recent call last):\n",
+ " File \"C:\\Users\\Taiyo\\.conda\\envs\\SMACV2\\lib\\site-packages\\sacred\\config\\captured_function.py\", line 42, in captured_function\n result = wrapped(*args, **kwargs)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\main.py\", line 44, in my_main\n run_REGISTRY[_config['run']](_run, config, _log)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 128, in run\n run_sequential(args=args, logger=logger)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\run\\run.py\", line 280, in run_sequential\n learner.train(episode_sample, runner.t_env, episode)\n",
+ " File \"C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src\\learners\\nq_learner.py\", line 185, in train\n print(f\"Avg cost {self.avg_time} seconds\")\n",
+ "OSError: [WinError 1] \u529f\u80fd\u932f\u8aa4\u3002\n"
+ ],
+ "heartbeat": "2024-12-29T18:20:41.285841",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "batch_size": 128,
+ "batch_size_run": 8,
+ "buffer_size": 5000,
+ "env_args": {
+ "map_name": "5m_vs_6m"
+ },
+ "epsilon_anneal_time": 100000,
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "runner": "parallel",
+ "t_max": 10050000,
+ "td_lambda": 0.6
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "env_args.map_name=5m_vs_6m",
+ "obs_agent_id=True",
+ "obs_last_action=True",
+ "runner=parallel",
+ "batch_size_run=8",
+ "buffer_size=5000",
+ "t_max=10050000",
+ "epsilon_anneal_time=100000",
+ "batch_size=128",
+ "td_lambda=0.6"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2024-12-29T18:19:50.281614",
+ "status": "FAILED",
+ "stop_time": "2024-12-29T18:20:41.289345"
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/7/config.json b/results/sacred/5m_vs_6m/qmix/7/config.json
new file mode 100644
index 0000000..3ae44e8
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/7/config.json
@@ -0,0 +1,96 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "n_rnn",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 128,
+ "batch_size_run": 8,
+ "buffer_cpu_only": true,
+ "buffer_size": 5000,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "enable_parallel_computing": false,
+ "env": "sc2",
+ "env_args": {
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "map_name": "5m_vs_6m",
+ "move_amount": 2,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_pathing_grid": false,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 100000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gain": 0.01,
+ "gamma": 0.99,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "label": "default_label",
+ "learner": "nq_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.001,
+ "mac": "n_mac",
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "qmix",
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "optimizer": "adam",
+ "per_alpha": 0.6,
+ "per_beta": 0.4,
+ "q_lambda": false,
+ "repeat_id": 1,
+ "return_priority": false,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "parallel",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 124389269,
+ "t_max": 10050000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 4,
+ "use_cuda": true,
+ "use_layer_norm": false,
+ "use_orthogonal": false,
+ "use_per": false,
+ "use_tensorboard": true
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/7/cout.txt b/results/sacred/5m_vs_6m/qmix/7/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/5m_vs_6m/qmix/7/info.json b/results/sacred/5m_vs_6m/qmix/7/info.json
new file mode 100644
index 0000000..915580e
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/7/info.json
@@ -0,0 +1,1525 @@
+{
+ "battle_won_mean": [
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0
+ ],
+ "battle_won_mean_T": [
+ 197,
+ 10276,
+ 20389,
+ 30510,
+ 40521,
+ 50534,
+ 60601,
+ 70713,
+ 80804,
+ 90808,
+ 100902,
+ 110949
+ ],
+ "dead_allies_mean": [
+ 5.0,
+ 4.99234693877551,
+ 4.997685185185185,
+ 4.997881355932203,
+ 4.996031746031746,
+ 5.0,
+ 5.0,
+ 5.0,
+ 4.998076923076923,
+ 4.979166666666667,
+ 5.0,
+ 5.0
+ ],
+ "dead_allies_mean_T": [
+ 197,
+ 10276,
+ 20389,
+ 30510,
+ 40521,
+ 50534,
+ 60601,
+ 70713,
+ 80804,
+ 90808,
+ 100902,
+ 110949
+ ],
+ "dead_enemies_mean": [
+ 0.0,
+ 0.002551020408163265,
+ 0.0,
+ 0.019067796610169493,
+ 0.07936507936507936,
+ 0.20522388059701493,
+ 0.5441176470588235,
+ 0.9253731343283582,
+ 1.3673076923076923,
+ 1.5520833333333333,
+ 2.0705645161290325,
+ 2.2172131147540983
+ ],
+ "dead_enemies_mean_T": [
+ 197,
+ 10276,
+ 20389,
+ 30510,
+ 40521,
+ 50534,
+ 60601,
+ 70713,
+ 80804,
+ 90808,
+ 100902,
+ 110949
+ ],
+ "ep_length_mean": [
+ 24.625,
+ 25.711734693877553,
+ 23.40972222222222,
+ 21.44279661016949,
+ 19.863095238095237,
+ 18.68097014925373,
+ 18.50551470588235,
+ 18.865671641791046,
+ 19.40576923076923,
+ 20.841666666666665,
+ 20.350806451612904,
+ 20.58811475409836
+ ],
+ "ep_length_mean_T": [
+ 197,
+ 10276,
+ 20389,
+ 30510,
+ 40521,
+ 50534,
+ 60601,
+ 70713,
+ 80804,
+ 90808,
+ 100902,
+ 110949
+ ],
+ "episode": [
+ 392,
+ 824,
+ 1296,
+ 1800,
+ 2336,
+ 2880,
+ 3416,
+ 3936,
+ 4424,
+ 4920
+ ],
+ "episode_T": [
+ 10062,
+ 20224,
+ 30352,
+ 40369,
+ 50387,
+ 60447,
+ 70562,
+ 80651,
+ 90808,
+ 100902
+ ],
+ "episode_in_buffer": [
+ 392,
+ 824,
+ 1296,
+ 1800,
+ 2336,
+ 2880,
+ 3416,
+ 3936,
+ 4424,
+ 4920
+ ],
+ "episode_in_buffer_T": [
+ 10062,
+ 20224,
+ 30352,
+ 40369,
+ 50387,
+ 60447,
+ 70562,
+ 80651,
+ 90808,
+ 100902
+ ],
+ "epsilon": [
+ 1.0,
+ 0.904411,
+ 0.807872,
+ 0.7116560000000001,
+ 0.6164945000000001,
+ 0.5213235,
+ 0.4257535000000001,
+ 0.3296610000000001,
+ 0.23381550000000006,
+ 0.13887250000000007,
+ 0.05,
+ 0.05
+ ],
+ "epsilon_T": [
+ 197,
+ 10276,
+ 20389,
+ 30510,
+ 40521,
+ 50534,
+ 60601,
+ 70713,
+ 80804,
+ 90808,
+ 100902,
+ 110949
+ ],
+ "grad_norm": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADI0NDgxMDg0MzE2ODBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyNDQ4MTA4NDMxNjgwcQFhLgEAAAAAAAAAe+FqPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADI0NDgxMDg0NzgyNDBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyNDQ4MTA4NDc4MjQwcQFhLgEAAAAAAAAAlJorPg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADI0NDgxMDg0MTI5NjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyNDQ4MTA4NDEyOTYwcQFhLgEAAAAAAAAA7u6tPQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADI0NDgxMDg0MTk4NzJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyNDQ4MTA4NDE5ODcycQFhLgEAAAAAAAAAJZYJPg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADI0NDgxMDg0MDgyNTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyNDQ4MTA4NDA4MjU2cQFhLgEAAAAAAAAA1cWEPQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADI0NDgxMDgzNTY1MTJxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyNDQ4MTA4MzU2NTEycQFhLgEAAAAAAAAAsnhZPg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADI0NDgxMDg0NDc0MjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyNDQ4MTA4NDQ3NDI0cQFhLgEAAAAAAAAApaFZPg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADI0NDgxMDg0NDEzNzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyNDQ4MTA4NDQxMzc2cQFhLgEAAAAAAAAAMEprPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADI0NDgxMDgzOTcxMjBxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyNDQ4MTA4Mzk3MTIwcQFhLgEAAAAAAAAA96uzPQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADI0NDgxMDg0MjIxNzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyNDQ4MTA4NDIyMTc2cQFhLgEAAAAAAAAAup0/Pw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADI0NDgxMDg0MTU2NDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyNDQ4MTA4NDE1NjQ4cQFhLgEAAAAAAAAAyaguPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "grad_norm_T": [
+ 3349,
+ 13450,
+ 23528,
+ 33566,
+ 43692,
+ 53774,
+ 63866,
+ 73918,
+ 83931,
+ 94015,
+ 104021
+ ],
+ "loss_td": [
+ 0.07439330220222473,
+ 0.012946677394211292,
+ 0.008807389996945858,
+ 0.010632005520164967,
+ 0.012976271100342274,
+ 0.015445969067513943,
+ 0.019227299839258194,
+ 0.02684800885617733,
+ 0.02358916774392128,
+ 0.02601030468940735,
+ 0.02578692138195038
+ ],
+ "loss_td_T": [
+ 3349,
+ 13450,
+ 23528,
+ 33566,
+ 43692,
+ 53774,
+ 63866,
+ 73918,
+ 83931,
+ 94015,
+ 104021
+ ],
+ "q_taken_mean": [
+ 0.0063814661544840815,
+ 0.12279404830447903,
+ 0.19687937885954215,
+ 0.24386920861945066,
+ 0.28813512528498775,
+ 0.3253443931632747,
+ 0.38058596680420104,
+ 0.39270581816493316,
+ 0.45930085358796297,
+ 0.475043726142844,
+ 0.4870358108271916
+ ],
+ "q_taken_mean_T": [
+ 3349,
+ 13450,
+ 23528,
+ 33566,
+ 43692,
+ 53774,
+ 63866,
+ 73918,
+ 83931,
+ 94015,
+ 104021
+ ],
+ "return_max": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.2641509433962264
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.8490566037735845
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.622641509433962
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.792452830188679
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.245283018867925
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.698113207547169
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.830188679245283
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.037735849056604
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.490566037735851
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.528301886792452
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 10.113207547169814
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.433962264150946
+ }
+ ],
+ "return_max_T": [
+ 197,
+ 10276,
+ 20389,
+ 30510,
+ 40521,
+ 50534,
+ 60601,
+ 70713,
+ 80804,
+ 90808,
+ 100902,
+ 110949
+ ],
+ "return_mean": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.641509433962264
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.5145360030804775
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.7919287211740043
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.0869043811960344
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.458520515124289
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.9925373134328357
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.779689234184239
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.510419600112644
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.133744557329463
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.54630503144654
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.356588557516738
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.655273739560779
+ }
+ ],
+ "return_mean_T": [
+ 197,
+ 10276,
+ 20389,
+ 30510,
+ 40521,
+ 50534,
+ 60601,
+ 70713,
+ 80804,
+ 90808,
+ 100902,
+ 110949
+ ],
+ "return_min": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.1320754716981132
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.4528301886792453
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.4528301886792453
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.679245283018868
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.679245283018868
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.1320754716981132
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.5849056603773584
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.0377358490566038
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.490566037735849
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.7169811320754715
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.943396226415094
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.30188679245283
+ }
+ ],
+ "return_min_T": [
+ 197,
+ 10276,
+ 20389,
+ 30510,
+ 40521,
+ 50534,
+ 60601,
+ 70713,
+ 80804,
+ 90808,
+ 100902,
+ 110949
+ ],
+ "return_std": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.33487244055280846
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.45973206614362516
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.49151227301761186
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.5922908628976341
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.7157753366964114
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.7848853470956797
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.927704916514746
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.9080357506333651
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.9562577313330906
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.989722234073299
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.9708493949314675
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.8465665686301762
+ }
+ ],
+ "return_std_T": [
+ 197,
+ 10276,
+ 20389,
+ 30510,
+ 40521,
+ 50534,
+ 60601,
+ 70713,
+ 80804,
+ 90808,
+ 100902,
+ 110949
+ ],
+ "target_mean": [
+ 0.0674623459194909,
+ 0.13363379898943775,
+ 0.19109473639572844,
+ 0.24021151264572022,
+ 0.29078516858119957,
+ 0.33207142979922993,
+ 0.37610051927044263,
+ 0.41077570695579496,
+ 0.4584343112244898,
+ 0.4591282365250184,
+ 0.49734687893958496
+ ],
+ "target_mean_T": [
+ 3349,
+ 13450,
+ 23528,
+ 33566,
+ 43692,
+ 53774,
+ 63866,
+ 73918,
+ 83931,
+ 94015,
+ 104021
+ ],
+ "td_error_abs": [
+ 0.07439329888294546,
+ 0.012946677783544363,
+ 0.008807389971743296,
+ 0.010632005982108103,
+ 0.012976271443014102,
+ 0.015445969416887978,
+ 0.019227300473647464,
+ 0.02684800940140927,
+ 0.023589168095895043,
+ 0.026010304185026617,
+ 0.025786921439751625
+ ],
+ "td_error_abs_T": [
+ 3349,
+ 13450,
+ 23528,
+ 33566,
+ 43692,
+ 53774,
+ 63866,
+ 73918,
+ 83931,
+ 94015,
+ 104021
+ ],
+ "test_battle_won_mean": [
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0
+ ],
+ "test_battle_won_mean_T": [
+ 197,
+ 10276,
+ 20389,
+ 30510,
+ 40521,
+ 50534,
+ 60601,
+ 70713,
+ 80804,
+ 90808,
+ 100902
+ ],
+ "test_dead_allies_mean": [
+ 0.0,
+ 5.0,
+ 5.0,
+ 5.0,
+ 5.0,
+ 5.0,
+ 5.0,
+ 5.0,
+ 5.0,
+ 5.0,
+ 5.0
+ ],
+ "test_dead_allies_mean_T": [
+ 197,
+ 10276,
+ 20389,
+ 30510,
+ 40521,
+ 50534,
+ 60601,
+ 70713,
+ 80804,
+ 90808,
+ 100902
+ ],
+ "test_dead_enemies_mean": [
+ 0.0,
+ 2.78125,
+ 1.65625,
+ 1.65625,
+ 2.0,
+ 2.03125,
+ 2.40625,
+ 2.96875,
+ 2.375,
+ 2.09375,
+ 2.15625
+ ],
+ "test_dead_enemies_mean_T": [
+ 197,
+ 10276,
+ 20389,
+ 30510,
+ 40521,
+ 50534,
+ 60601,
+ 70713,
+ 80804,
+ 90808,
+ 100902
+ ],
+ "test_ep_length_mean": [
+ 70.0,
+ 20.71875,
+ 18.5625,
+ 18.6875,
+ 18.90625,
+ 18.65625,
+ 19.40625,
+ 21.15625,
+ 20.59375,
+ 20.46875,
+ 20.40625
+ ],
+ "test_ep_length_mean_T": [
+ 197,
+ 10276,
+ 20389,
+ 30510,
+ 40521,
+ 50534,
+ 60601,
+ 70713,
+ 80804,
+ 90808,
+ 100902
+ ],
+ "test_return_max": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.0
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.528301886792454
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.132075471698113
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.264150943396228
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.037735849056604
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.584905660377358
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.16981132075472
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 11.28301886792453
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.811320754716981
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.716981132075473
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.716981132075473
+ }
+ ],
+ "test_return_max_T": [
+ 197,
+ 10276,
+ 20389,
+ 30510,
+ 40521,
+ 50534,
+ 60601,
+ 70713,
+ 80804,
+ 90808,
+ 100902
+ ],
+ "test_return_mean": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.0
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.123820754716982
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.878537735849056
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.359669811320755
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.818396226415095
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.508254716981132
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.869103773584905
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.880896226415095
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.924528301886792
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.6450471698113205
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.781839622641509
+ }
+ ],
+ "test_return_mean_T": [
+ 197,
+ 10276,
+ 20389,
+ 30510,
+ 40521,
+ 50534,
+ 60601,
+ 70713,
+ 80804,
+ 90808,
+ 100902
+ ],
+ "test_return_min": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.0
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.509433962264151
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.754716981132075
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.2075471698113205
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.509433962264151
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.471698113207546
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.924528301886792
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.698113207547169
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.69811320754717
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.056603773584905
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.471698113207546
+ }
+ ],
+ "test_return_min_T": [
+ 197,
+ 10276,
+ 20389,
+ 30510,
+ 40521,
+ 50534,
+ 60601,
+ 70713,
+ 80804,
+ 90808,
+ 100902
+ ],
+ "test_return_std": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.0
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.6854235771791629
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.48147734328135483
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.7221748564086867
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.6617531154677376
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.6113167055974683
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.7026550608075507
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.2531238777938873
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.49210571153754623
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.7761324467181493
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.7051679731294659
+ }
+ ],
+ "test_return_std_T": [
+ 197,
+ 10276,
+ 20389,
+ 30510,
+ 40521,
+ 50534,
+ 60601,
+ 70713,
+ 80804,
+ 90808,
+ 100902
+ ]
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/7/metrics.json b/results/sacred/5m_vs_6m/qmix/7/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/7/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/7/run.json b/results/sacred/5m_vs_6m/qmix/7/run.json
new file mode 100644
index 0000000..d5cb20e
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/7/run.json
@@ -0,0 +1,118 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_888918ff84cf3bc1e6e9dcc8919870b2.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "heartbeat": "2024-12-29T18:29:09.160676",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "batch_size": 128,
+ "batch_size_run": 8,
+ "buffer_size": 5000,
+ "env_args": {
+ "map_name": "5m_vs_6m"
+ },
+ "epsilon_anneal_time": 100000,
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "runner": "parallel",
+ "t_max": 10050000,
+ "td_lambda": 0.6
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "env_args.map_name=5m_vs_6m",
+ "obs_agent_id=True",
+ "obs_last_action=True",
+ "runner=parallel",
+ "batch_size_run=8",
+ "buffer_size=5000",
+ "t_max=10050000",
+ "epsilon_anneal_time=100000",
+ "batch_size=128",
+ "td_lambda=0.6"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2024-12-29T18:23:08.096157",
+ "status": "INTERRUPTED",
+ "stop_time": "2024-12-29T18:29:09.182233"
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/8/config.json b/results/sacred/5m_vs_6m/qmix/8/config.json
new file mode 100644
index 0000000..b57cbfb
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/8/config.json
@@ -0,0 +1,96 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "n_rnn",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_cpu_only": true,
+ "buffer_size": 5000,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "enable_parallel_computing": false,
+ "env": "sc2",
+ "env_args": {
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "map_name": "5m_vs_6m",
+ "move_amount": 2,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_pathing_grid": false,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 100000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gain": 0.01,
+ "gamma": 0.99,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "label": "default_label",
+ "learner": "nq_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.001,
+ "mac": "n_mac",
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "qmix",
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "optimizer": "adam",
+ "per_alpha": 0.6,
+ "per_beta": 0.4,
+ "q_lambda": false,
+ "repeat_id": 1,
+ "return_priority": false,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "parallel",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 82495426,
+ "t_max": 100000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 4,
+ "use_cuda": true,
+ "use_layer_norm": false,
+ "use_orthogonal": false,
+ "use_per": false,
+ "use_tensorboard": true
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/8/cout.txt b/results/sacred/5m_vs_6m/qmix/8/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/5m_vs_6m/qmix/8/info.json b/results/sacred/5m_vs_6m/qmix/8/info.json
new file mode 100644
index 0000000..349aa45
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/8/info.json
@@ -0,0 +1,672 @@
+{
+ "battle_won_mean": [
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0
+ ],
+ "battle_won_mean_T": [
+ 108,
+ 10163,
+ 20248,
+ 30251,
+ 40289
+ ],
+ "dead_allies_mean": [
+ 5.0,
+ 4.994949494949495,
+ 4.997727272727273,
+ 4.9978991596638656,
+ 4.996
+ ],
+ "dead_allies_mean_T": [
+ 108,
+ 10163,
+ 20248,
+ 30251,
+ 40289
+ ],
+ "dead_enemies_mean": [
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.018907563025210083,
+ 0.124
+ ],
+ "dead_enemies_mean_T": [
+ 108,
+ 10163,
+ 20248,
+ 30251,
+ 40289
+ ],
+ "ep_length_mean": [
+ 27.0,
+ 25.391414141414142,
+ 22.920454545454547,
+ 21.014705882352942,
+ 20.076
+ ],
+ "ep_length_mean_T": [
+ 108,
+ 10163,
+ 20248,
+ 30251,
+ 40289
+ ],
+ "episode": [
+ 396,
+ 832,
+ 1308,
+ 1808
+ ],
+ "episode_T": [
+ 10068,
+ 20069,
+ 30094,
+ 40134
+ ],
+ "episode_in_buffer": [
+ 396,
+ 832,
+ 1308,
+ 1808
+ ],
+ "episode_in_buffer_T": [
+ 10068,
+ 20069,
+ 30094,
+ 40134
+ ],
+ "epsilon": [
+ 1.0,
+ 0.904354,
+ 0.8084800000000001,
+ 0.7133565000000001,
+ 0.6179955
+ ],
+ "epsilon_T": [
+ 108,
+ 10163,
+ 20248,
+ 30251,
+ 40289
+ ],
+ "grad_norm": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADI5Mzc2MjcxMTk2OTZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyOTM3NjI3MTE5Njk2cQFhLgEAAAAAAAAAwgPxPg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADI5Mzc2MjcxNjc5ODRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyOTM3NjI3MTY3OTg0cQFhLgEAAAAAAAAAjyhPPQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADI5Mzc2MjcxNTQ1NDRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyOTM3NjI3MTU0NTQ0cQFhLgEAAAAAAAAARv11PQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADI5Mzc2MjcxMTQ2MDhxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyOTM3NjI3MTE0NjA4cQFhLgEAAAAAAAAAZGYAPg=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "grad_norm_T": [
+ 3390,
+ 13498,
+ 23565,
+ 33599
+ ],
+ "loss_td": [
+ 0.029275426641106606,
+ 0.008867704309523106,
+ 0.00828527007251978,
+ 0.011645584367215633
+ ],
+ "loss_td_T": [
+ 3390,
+ 13498,
+ 23565,
+ 33599
+ ],
+ "q_taken_mean": [
+ 0.010331014897619378,
+ 0.13366268382352942,
+ 0.20415240698298645,
+ 0.26490462531228465
+ ],
+ "q_taken_mean_T": [
+ 3390,
+ 13498,
+ 23565,
+ 33599
+ ],
+ "return_max": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.5849056603773584
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.3962264150943393
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.3962264150943393
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.30188679245283
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.245283018867924
+ }
+ ],
+ "return_max_T": [
+ 108,
+ 10163,
+ 20248,
+ 30251,
+ 40289
+ ],
+ "return_mean": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.3018867924528301
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.5191538021726703
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.7969125214408233
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.1112256223244015
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 2.654641509433962
+ }
+ ],
+ "return_mean_T": [
+ 108,
+ 10163,
+ 20248,
+ 30251,
+ 40289
+ ],
+ "return_min": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.9056603773584906
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.4528301886792453
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.679245283018868
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.9056603773584906
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.1320754716981132
+ }
+ ],
+ "return_min_T": [
+ 108,
+ 10163,
+ 20248,
+ 30251,
+ 40289
+ ],
+ "return_std": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.29412183524754515
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.45845612324356316
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.5206001167133067
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.6097681028977217
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.765011763917395
+ }
+ ],
+ "return_std_T": [
+ 108,
+ 10163,
+ 20248,
+ 30251,
+ 40289
+ ],
+ "target_mean": [
+ 0.046066559648091816,
+ 0.13057054290381234,
+ 0.20175707048026098,
+ 0.2706919190967436
+ ],
+ "target_mean_T": [
+ 3390,
+ 13498,
+ 23565,
+ 33599
+ ],
+ "td_error_abs": [
+ 0.029275427174075868,
+ 0.008867704045132298,
+ 0.008285269786077323,
+ 0.011645584579667578
+ ],
+ "td_error_abs_T": [
+ 3390,
+ 13498,
+ 23565,
+ 33599
+ ],
+ "test_battle_won_mean": [
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0
+ ],
+ "test_battle_won_mean_T": [
+ 108,
+ 10163,
+ 20248,
+ 30251,
+ 40289
+ ],
+ "test_dead_allies_mean": [
+ 2.59375,
+ 5.0,
+ 5.0,
+ 5.0,
+ 5.0
+ ],
+ "test_dead_allies_mean_T": [
+ 108,
+ 10163,
+ 20248,
+ 30251,
+ 40289
+ ],
+ "test_dead_enemies_mean": [
+ 0.0625,
+ 2.125,
+ 2.28125,
+ 1.90625,
+ 2.0625
+ ],
+ "test_dead_enemies_mean_T": [
+ 108,
+ 10163,
+ 20248,
+ 30251,
+ 40289
+ ],
+ "test_ep_length_mean": [
+ 60.59375,
+ 18.9375,
+ 19.96875,
+ 18.53125,
+ 18.96875
+ ],
+ "test_ep_length_mean_T": [
+ 108,
+ 10163,
+ 20248,
+ 30251,
+ 40289
+ ],
+ "test_return_max": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.113207547169811
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.943396226415096
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.433962264150944
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.811320754716981
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 8.49056603773585
+ }
+ ],
+ "test_return_max_T": [
+ 108,
+ 10163,
+ 20248,
+ 30251,
+ 40289
+ ],
+ "test_return_mean": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.283018867924528
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.964622641509434
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 7.1120283018867925
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.475235849056602
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.799528301886792
+ }
+ ],
+ "test_return_mean_T": [
+ 108,
+ 10163,
+ 20248,
+ 30251,
+ 40289
+ ],
+ "test_return_min": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.0
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.245283018867924
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 4.981132075471698
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.245283018867924
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.245283018867924
+ }
+ ],
+ "test_return_min_T": [
+ 108,
+ 10163,
+ 20248,
+ 30251,
+ 40289
+ ],
+ "test_return_std": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.141821257893222
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.9287992177150309
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.0831398416674247
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.6995052016567793
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.8086527819915745
+ }
+ ],
+ "test_return_std_T": [
+ 108,
+ 10163,
+ 20248,
+ 30251,
+ 40289
+ ]
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/8/metrics.json b/results/sacred/5m_vs_6m/qmix/8/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/8/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/8/run.json b/results/sacred/5m_vs_6m/qmix/8/run.json
new file mode 100644
index 0000000..7feb511
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/8/run.json
@@ -0,0 +1,118 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_888918ff84cf3bc1e6e9dcc8919870b2.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "heartbeat": "2024-12-29T18:33:07.470727",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_size": 5000,
+ "env_args": {
+ "map_name": "5m_vs_6m"
+ },
+ "epsilon_anneal_time": 100000,
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "runner": "parallel",
+ "t_max": 100000,
+ "td_lambda": 0.6
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "env_args.map_name=5m_vs_6m",
+ "obs_agent_id=True",
+ "obs_last_action=True",
+ "runner=parallel",
+ "batch_size_run=4",
+ "buffer_size=5000",
+ "t_max=100000",
+ "epsilon_anneal_time=100000",
+ "batch_size=128",
+ "td_lambda=0.6"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2024-12-29T18:29:40.087689",
+ "status": "INTERRUPTED",
+ "stop_time": "2024-12-29T18:33:07.478265"
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/9/config.json b/results/sacred/5m_vs_6m/qmix/9/config.json
new file mode 100644
index 0000000..cd21160
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/9/config.json
@@ -0,0 +1,96 @@
+{
+ "action_selector": "epsilon_greedy",
+ "agent": "n_rnn",
+ "agent_output_type": "q",
+ "asn_hidden_size": 32,
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_cpu_only": true,
+ "buffer_size": 5000,
+ "checkpoint_path": "",
+ "cpu_inference": true,
+ "critic_lr": 0.0005,
+ "double_q": true,
+ "enable_parallel_computing": false,
+ "env": "sc2",
+ "env_args": {
+ "continuing_episode": false,
+ "debug": false,
+ "difficulty": "7",
+ "game_version": null,
+ "heuristic_ai": false,
+ "heuristic_rest": false,
+ "map_name": "5m_vs_6m",
+ "move_amount": 2,
+ "obs_all_health": true,
+ "obs_instead_of_state": false,
+ "obs_last_action": false,
+ "obs_own_health": true,
+ "obs_pathing_grid": false,
+ "obs_terrain_height": false,
+ "obs_timestep_number": false,
+ "replay_dir": "",
+ "replay_prefix": "",
+ "reward_death_value": 10,
+ "reward_defeat": 0,
+ "reward_negative_scale": 0.5,
+ "reward_only_positive": true,
+ "reward_scale": true,
+ "reward_scale_rate": 20,
+ "reward_sparse": false,
+ "reward_win": 200,
+ "seed": null,
+ "state_last_action": true,
+ "state_timestep_number": false,
+ "step_mul": 8
+ },
+ "epsilon_anneal_time": 100000,
+ "epsilon_finish": 0.05,
+ "epsilon_start": 1.0,
+ "evaluate": false,
+ "gain": 0.01,
+ "gamma": 0.99,
+ "grad_norm_clip": 10,
+ "hypernet_embed": 64,
+ "label": "default_label",
+ "learner": "nq_learner",
+ "learner_log_interval": 10000,
+ "load_step": 0,
+ "local_results_path": "results",
+ "log_interval": 10000,
+ "lr": 0.001,
+ "mac": "n_mac",
+ "mixer": "qmix",
+ "mixing_embed_dim": 32,
+ "name": "qmix",
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "optim_alpha": 0.99,
+ "optim_eps": 1e-05,
+ "optimizer": "adam",
+ "per_alpha": 0.6,
+ "per_beta": 0.4,
+ "q_lambda": false,
+ "repeat_id": 1,
+ "return_priority": false,
+ "rnn_hidden_dim": 64,
+ "run": "default",
+ "runner": "parallel",
+ "runner_log_interval": 10000,
+ "save_model": true,
+ "save_model_interval": 2000000,
+ "save_replay": false,
+ "seed": 813146773,
+ "t_max": 100000,
+ "target_update_interval": 200,
+ "td_lambda": 0.6,
+ "test_greedy": true,
+ "test_interval": 10000,
+ "test_nepisode": 32,
+ "thread_num": 4,
+ "use_cuda": true,
+ "use_layer_norm": false,
+ "use_orthogonal": false,
+ "use_per": false,
+ "use_tensorboard": true
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/9/cout.txt b/results/sacred/5m_vs_6m/qmix/9/cout.txt
new file mode 100644
index 0000000..e69de29
diff --git a/results/sacred/5m_vs_6m/qmix/9/info.json b/results/sacred/5m_vs_6m/qmix/9/info.json
new file mode 100644
index 0000000..ffa4b95
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/9/info.json
@@ -0,0 +1,348 @@
+{
+ "battle_won_mean": [
+ 0.0,
+ 0.0
+ ],
+ "battle_won_mean_T": [
+ 91,
+ 10157
+ ],
+ "dead_allies_mean": [
+ 5.0,
+ 4.994897959183674
+ ],
+ "dead_allies_mean_T": [
+ 91,
+ 10157
+ ],
+ "dead_enemies_mean": [
+ 0.0,
+ 0.0
+ ],
+ "dead_enemies_mean_T": [
+ 91,
+ 10157
+ ],
+ "ep_length_mean": [
+ 22.75,
+ 25.678571428571427
+ ],
+ "ep_length_mean_T": [
+ 91,
+ 10157
+ ],
+ "episode": [
+ 392
+ ],
+ "episode_T": [
+ 10066
+ ],
+ "episode_in_buffer": [
+ 392
+ ],
+ "episode_in_buffer_T": [
+ 10066
+ ],
+ "epsilon": [
+ 1.0,
+ 0.904373
+ ],
+ "epsilon_T": [
+ 91,
+ 10157
+ ],
+ "grad_norm": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADI3NTczMjk5NzQ3MzZxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyNzU3MzI5OTc0NzM2cQFhLgEAAAAAAAAAxFokPw=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch._utils._rebuild_tensor_v2"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/reduce": [
+ {
+ "py/function": "torch.storage._load_from_bytes"
+ },
+ {
+ "py/tuple": [
+ {
+ "py/b64": "gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAAAGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAAaW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApGbG9hdFN0b3JhZ2UKcQFYDQAAADI3NTczMzAwMDc2NjRxAlgGAAAAY3VkYTowcQNLAU50cQRRLoACXXEAWA0AAAAyNzU3MzMwMDA3NjY0cQFhLgEAAAAAAAAAMfg7PQ=="
+ }
+ ]
+ }
+ ]
+ },
+ 0,
+ {
+ "py/tuple": []
+ },
+ {
+ "py/tuple": []
+ },
+ false,
+ {
+ "py/reduce": [
+ {
+ "py/type": "collections.OrderedDict"
+ },
+ {
+ "py/tuple": []
+ },
+ null,
+ null,
+ {
+ "py/tuple": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "grad_norm_T": [
+ 3449,
+ 13477
+ ],
+ "loss_td": [
+ 0.04804620519280434,
+ 0.008500373922288418
+ ],
+ "loss_td_T": [
+ 3449,
+ 13477
+ ],
+ "q_taken_mean": [
+ 0.029340348170438894,
+ 0.13147327403257877
+ ],
+ "q_taken_mean_T": [
+ 3449,
+ 13477
+ ],
+ "return_max": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.5849056603773584
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 3.3962264150943393
+ }
+ ],
+ "return_max_T": [
+ 91,
+ 10157
+ ],
+ "return_mean": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.1320754716981132
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 1.5294570658452062
+ }
+ ],
+ "return_mean_T": [
+ 91,
+ 10157
+ ],
+ "return_min": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.4528301886792453
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.679245283018868
+ }
+ ],
+ "return_min_T": [
+ 91,
+ 10157
+ ],
+ "return_std": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.4235838551064839
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.4868555532060793
+ }
+ ],
+ "return_std_T": [
+ 91,
+ 10157
+ ],
+ "target_mean": [
+ 0.0754477704701544,
+ 0.13361987431649586
+ ],
+ "target_mean_T": [
+ 3449,
+ 13477
+ ],
+ "td_error_abs": [
+ 0.04804620607309184,
+ 0.008500374286099652
+ ],
+ "td_error_abs_T": [
+ 3449,
+ 13477
+ ],
+ "test_battle_won_mean": [
+ 0.0,
+ 0.0
+ ],
+ "test_battle_won_mean_T": [
+ 91,
+ 10157
+ ],
+ "test_dead_allies_mean": [
+ 0.0,
+ 5.0
+ ],
+ "test_dead_allies_mean_T": [
+ 91,
+ 10157
+ ],
+ "test_dead_enemies_mean": [
+ 0.0,
+ 2.0
+ ],
+ "test_dead_enemies_mean_T": [
+ 91,
+ 10157
+ ],
+ "test_ep_length_mean": [
+ 70.0,
+ 18.71875
+ ],
+ "test_ep_length_mean_T": [
+ 91,
+ 10157
+ ],
+ "test_return_max": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.0
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 9.433962264150942
+ }
+ ],
+ "test_return_max_T": [
+ 91,
+ 10157
+ ],
+ "test_return_mean": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.0
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 6.662735849056603
+ }
+ ],
+ "test_return_mean_T": [
+ 91,
+ 10157
+ ],
+ "test_return_min": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.0
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 5.245283018867925
+ }
+ ],
+ "test_return_min_T": [
+ 91,
+ 10157
+ ],
+ "test_return_std": [
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.0
+ },
+ {
+ "dtype": "float64",
+ "py/object": "numpy.float64",
+ "value": 0.8664331680367857
+ }
+ ],
+ "test_return_std_T": [
+ 91,
+ 10157
+ ]
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/9/metrics.json b/results/sacred/5m_vs_6m/qmix/9/metrics.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/9/metrics.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/9/run.json b/results/sacred/5m_vs_6m/qmix/9/run.json
new file mode 100644
index 0000000..697d6f5
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/9/run.json
@@ -0,0 +1,118 @@
+{
+ "artifacts": [],
+ "command": "my_main",
+ "experiment": {
+ "base_dir": "C:\\Users\\Taiyo\\Desktop\\SMAC V2\\pymarl3\\src",
+ "dependencies": [
+ "numpy==1.23.1",
+ "PyYAML==6.0.2",
+ "sacred==0.8.7",
+ "torch==1.13.1+cu117"
+ ],
+ "mainfile": "main.py",
+ "name": "pymarl",
+ "repositories": [
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ },
+ {
+ "commit": "44bb188185fd0292a1a306c86396027efb44224d",
+ "dirty": true,
+ "url": "https://github.com/tjuHaoXiaotian/pymarl3.git"
+ }
+ ],
+ "sources": [
+ [
+ "main.py",
+ "_sources\\main_888918ff84cf3bc1e6e9dcc8919870b2.py"
+ ],
+ [
+ "utils\\logging.py",
+ "_sources\\logging_f71df6d788e929fac28afdf951d63d54.py"
+ ]
+ ]
+ },
+ "heartbeat": "2024-12-29T18:35:59.537230",
+ "host": {
+ "ENV": {},
+ "cpu": "AMD Ryzen 7 5700X3D 8-Core Processor",
+ "gpus": {
+ "driver_version": "560.94",
+ "gpus": [
+ {
+ "model": "NVIDIA GeForce RTX 4080 SUPER",
+ "persistence_mode": false,
+ "total_memory": 16376
+ }
+ ]
+ },
+ "hostname": "Taiyopen",
+ "os": [
+ "Windows",
+ "Windows-10-10.0.22631-SP0"
+ ],
+ "python_version": "3.10.16"
+ },
+ "meta": {
+ "command": "my_main",
+ "config_updates": {
+ "batch_size": 128,
+ "batch_size_run": 4,
+ "buffer_size": 5000,
+ "env_args": {
+ "map_name": "5m_vs_6m"
+ },
+ "epsilon_anneal_time": 100000,
+ "obs_agent_id": true,
+ "obs_last_action": true,
+ "runner": "parallel",
+ "t_max": 100000,
+ "td_lambda": 0.6
+ },
+ "named_configs": [],
+ "options": {
+ "--beat-interval": null,
+ "--capture": null,
+ "--comment": null,
+ "--debug": false,
+ "--enforce_clean": false,
+ "--file_storage": null,
+ "--force": false,
+ "--help": false,
+ "--id": null,
+ "--loglevel": null,
+ "--mongo_db": null,
+ "--name": null,
+ "--pdb": false,
+ "--print-config": false,
+ "--priority": null,
+ "--queue": false,
+ "--s3": null,
+ "--sql": null,
+ "--tiny_db": null,
+ "--unobserved": false,
+ "COMMAND": null,
+ "UPDATE": [
+ "env_args.map_name=5m_vs_6m",
+ "obs_agent_id=True",
+ "obs_last_action=True",
+ "runner=parallel",
+ "batch_size_run=4",
+ "buffer_size=5000",
+ "t_max=100000",
+ "epsilon_anneal_time=100000",
+ "batch_size=128",
+ "td_lambda=0.6"
+ ],
+ "help": false,
+ "with": true
+ }
+ },
+ "resources": [],
+ "result": null,
+ "start_time": "2024-12-29T18:34:18.151259",
+ "status": "INTERRUPTED",
+ "stop_time": "2024-12-29T18:35:59.543818"
+}
\ No newline at end of file
diff --git a/results/sacred/5m_vs_6m/qmix/_sources/logging_f71df6d788e929fac28afdf951d63d54.py b/results/sacred/5m_vs_6m/qmix/_sources/logging_f71df6d788e929fac28afdf951d63d54.py
new file mode 100644
index 0000000..5393b7f
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/_sources/logging_f71df6d788e929fac28afdf951d63d54.py
@@ -0,0 +1,68 @@
+from collections import defaultdict
+import logging
+import numpy as np
+import torch as th
+
+class Logger:
+ def __init__(self, console_logger):
+ self.console_logger = console_logger
+
+ self.use_tb = False
+ self.use_sacred = False
+ self.use_hdf = False
+
+ self.stats = defaultdict(lambda: [])
+
+ def setup_tb(self, directory_name):
+ # Import here so it doesn't have to be installed if you don't use it
+ from tensorboard_logger import configure, log_value
+ configure(directory_name)
+ self.tb_logger = log_value
+ self.use_tb = True
+
+ def setup_sacred(self, sacred_run_dict):
+ self.sacred_info = sacred_run_dict.info
+ self.use_sacred = True
+
+ def log_stat(self, key, value, t, to_sacred=True):
+ self.stats[key].append((t, value))
+
+ if self.use_tb:
+ self.tb_logger(key, value, t)
+
+ if self.use_sacred and to_sacred:
+ if key in self.sacred_info:
+ self.sacred_info["{}_T".format(key)].append(t)
+ self.sacred_info[key].append(value)
+ else:
+ self.sacred_info["{}_T".format(key)] = [t]
+ self.sacred_info[key] = [value]
+
+ def print_recent_stats(self):
+ log_str = "Recent Stats | t_env: {:>10} | Episode: {:>8}\n".format(*self.stats["episode"][-1])
+ i = 0
+ for (k, v) in sorted(self.stats.items()):
+ if k == "episode":
+ continue
+ i += 1
+ window = 5 if k != "epsilon" else 1
+ item = "{:.4f}".format(th.mean(th.tensor([float(x[1]) for x in self.stats[k][-window:]])))
+ log_str += "{:<25}{:>8}".format(k + ":", item)
+ log_str += "\n" if i % 4 == 0 else "\t"
+ self.console_logger.info(log_str)
+ # Reset stats to avoid accumulating logs in memory
+ self.stats = defaultdict(lambda: [])
+
+
+# set up a custom logger
+def get_logger():
+ logger = logging.getLogger()
+ logger.handlers = []
+ ch = logging.StreamHandler()
+ formatter = logging.Formatter('[%(levelname)s %(asctime)s] %(name)s %(message)s', '%H:%M:%S')
+ ch.setFormatter(formatter)
+ logger.addHandler(ch)
+ logger.setLevel('DEBUG')
+
+ return logger
+
diff --git a/results/sacred/5m_vs_6m/qmix/_sources/main_888918ff84cf3bc1e6e9dcc8919870b2.py b/results/sacred/5m_vs_6m/qmix/_sources/main_888918ff84cf3bc1e6e9dcc8919870b2.py
new file mode 100644
index 0000000..47de339
--- /dev/null
+++ b/results/sacred/5m_vs_6m/qmix/_sources/main_888918ff84cf3bc1e6e9dcc8919870b2.py
@@ -0,0 +1,124 @@
+import random
+
+import numpy as np
+import os
+import collections
+from os.path import dirname, abspath, join
+from copy import deepcopy
+from sacred import Experiment, SETTINGS
+from sacred.observers import FileStorageObserver
+from sacred.utils import apply_backspaces_and_linefeeds
+import sys
+import torch as th
+from utils.logging import get_logger
+import yaml
+import collections.abc
+
+from run import REGISTRY as run_REGISTRY
+
+SETTINGS['CAPTURE_MODE'] = "fd" # set to "no" if you want to see stdout/stderr in console
+logger = get_logger()
+
+ex = Experiment("pymarl")
+ex.logger = logger
+ex.captured_out_filter = apply_backspaces_and_linefeeds
+
+results_path = join(dirname(dirname(abspath(__file__))))
+
+
+@ex.main
+def my_main(_run, _config, _log):
+ # Setting the random seed throughout the modules
+ config = config_copy(_config)
+ random.seed(config["seed"])
+ np.random.seed(config["seed"])
+ th.manual_seed(config["seed"])
+ th.cuda.manual_seed(config["seed"])
+ # th.cuda.manual_seed_all(config["seed"])
+ th.backends.cudnn.deterministic = True # cudnn
+
+
+ config['env_args']['seed'] = config["seed"]
+
+ # run
+ run_REGISTRY[_config['run']](_run, config, _log)
+
+
+def _get_config(params, arg_name, subfolder):
+ config_name = None
+ for _i, _v in enumerate(params):
+ if _v.split("=")[0] == arg_name:
+ config_name = _v.split("=")[1]
+ del params[_i]
+ break
+
+ if config_name is not None:
+ with open(os.path.join(os.path.dirname(__file__), "config", subfolder, "{}.yaml".format(config_name)),
+ "r") as f:
+ try:
+ config_dict = yaml.load(f, Loader=yaml.SafeLoader)
+ except yaml.YAMLError as exc:
+ assert False, "{}.yaml error: {}".format(config_name, exc)
+ return config_dict
+
+
+def recursive_dict_update(d, u):
+ for k, v in u.items():
+ if isinstance(v, collections.abc.Mapping):
+ d[k] = recursive_dict_update(d.get(k, {}), v)
+ else:
+ d[k] = v
+ return d
+
+
+def config_copy(config):
+ if isinstance(config, dict):
+ return {k: config_copy(v) for k, v in config.items()}
+ elif isinstance(config, list):
+ return [config_copy(v) for v in config]
+ else:
+ return deepcopy(config)
+
+
+def parse_command(params, key, default):
+ result = default
+ for _i, _v in enumerate(params):
+ if _v.split("=")[0].strip() == key:
+ result = _v[_v.index('=') + 1:].strip()
+ break
+ return result
+
+
+if __name__ == '__main__':
+ params = deepcopy(sys.argv)
+
+ # Get the defaults from default.yaml
+ with open(os.path.join(os.path.dirname(__file__), "config", "default.yaml"), "r") as f:
+ try:
+ config_dict = yaml.load(f, Loader=yaml.SafeLoader)
+ except yaml.YAMLError as exc:
+ assert False, "default.yaml error: {}".format(exc)
+
+ # Load algorithm and env base configs
+ env_config = _get_config(params, "--env-config", "envs")
+ alg_config = _get_config(params, "--config", "algs")
+ # config_dict = {**config_dict, **env_config, **alg_config}
+ config_dict = recursive_dict_update(config_dict, env_config)
+ config_dict = recursive_dict_update(config_dict, alg_config)
+
+ # now add all the config to sacred
+ ex.add_config(config_dict)
+
+ # Save to disk by default for sacred
+ map_name = parse_command(params, "env_args.map_name", config_dict['env_args']['map_name'])
+ algo_name = parse_command(params, "name", config_dict['name'])
+ local_results_path = parse_command(params, "local_results_path", config_dict['local_results_path'])
+ file_obs_path = join(results_path, local_results_path, "sacred", map_name, algo_name)
+
+ logger.info("Saving to FileStorageObserver in {}.".format(file_obs_path))
+ ex.observers.append(FileStorageObserver.create(file_obs_path))
+
+ ex.run_commandline(params)
+
+ # flush
+ sys.stdout.flush()
diff --git a/results/tb_logs/feudal__2024-12-30_02-43-20/events.out.tfevents.1735497800.Taiyopen b/results/tb_logs/feudal__2024-12-30_02-43-20/events.out.tfevents.1735497800.Taiyopen
new file mode 100644
index 0000000..fc54f1b
Binary files /dev/null and b/results/tb_logs/feudal__2024-12-30_02-43-20/events.out.tfevents.1735497800.Taiyopen differ
diff --git a/results/tb_logs/feudal__2024-12-30_02-43-50/events.out.tfevents.1735497830.Taiyopen b/results/tb_logs/feudal__2024-12-30_02-43-50/events.out.tfevents.1735497830.Taiyopen
new file mode 100644
index 0000000..1e174b3
Binary files /dev/null and b/results/tb_logs/feudal__2024-12-30_02-43-50/events.out.tfevents.1735497830.Taiyopen differ
diff --git a/results/tb_logs/feudal__2024-12-30_02-44-09/events.out.tfevents.1735497849.Taiyopen b/results/tb_logs/feudal__2024-12-30_02-44-09/events.out.tfevents.1735497849.Taiyopen
new file mode 100644
index 0000000..5335d60
Binary files /dev/null and b/results/tb_logs/feudal__2024-12-30_02-44-09/events.out.tfevents.1735497849.Taiyopen differ
diff --git a/results/tb_logs/feudal__2024-12-30_02-46-05/events.out.tfevents.1735497965.Taiyopen b/results/tb_logs/feudal__2024-12-30_02-46-05/events.out.tfevents.1735497965.Taiyopen
new file mode 100644
index 0000000..171b984
Binary files /dev/null and b/results/tb_logs/feudal__2024-12-30_02-46-05/events.out.tfevents.1735497965.Taiyopen differ
diff --git a/results/tb_logs/feudal__2024-12-30_02-48-10/events.out.tfevents.1735498090.Taiyopen b/results/tb_logs/feudal__2024-12-30_02-48-10/events.out.tfevents.1735498090.Taiyopen
new file mode 100644
index 0000000..2289aa3
Binary files /dev/null and b/results/tb_logs/feudal__2024-12-30_02-48-10/events.out.tfevents.1735498090.Taiyopen differ
diff --git a/results/tb_logs/feudal__2024-12-30_02-49-13/events.out.tfevents.1735498153.Taiyopen b/results/tb_logs/feudal__2024-12-30_02-49-13/events.out.tfevents.1735498153.Taiyopen
new file mode 100644
index 0000000..639b6e3
Binary files /dev/null and b/results/tb_logs/feudal__2024-12-30_02-49-13/events.out.tfevents.1735498153.Taiyopen differ
diff --git a/results/tb_logs/feudal__2024-12-30_02-50-58/events.out.tfevents.1735498258.Taiyopen b/results/tb_logs/feudal__2024-12-30_02-50-58/events.out.tfevents.1735498258.Taiyopen
new file mode 100644
index 0000000..d48b7a3
Binary files /dev/null and b/results/tb_logs/feudal__2024-12-30_02-50-58/events.out.tfevents.1735498258.Taiyopen differ
diff --git a/results/tb_logs/feudal__2024-12-30_02-52-12/events.out.tfevents.1735498332.Taiyopen b/results/tb_logs/feudal__2024-12-30_02-52-12/events.out.tfevents.1735498332.Taiyopen
new file mode 100644
index 0000000..485b613
Binary files /dev/null and b/results/tb_logs/feudal__2024-12-30_02-52-12/events.out.tfevents.1735498332.Taiyopen differ
diff --git a/results/tb_logs/feudal__2024-12-30_02-58-22/events.out.tfevents.1735498702.Taiyopen b/results/tb_logs/feudal__2024-12-30_02-58-22/events.out.tfevents.1735498702.Taiyopen
new file mode 100644
index 0000000..a04991f
Binary files /dev/null and b/results/tb_logs/feudal__2024-12-30_02-58-22/events.out.tfevents.1735498702.Taiyopen differ
diff --git a/results/tb_logs/feudal__2024-12-30_02-59-31/events.out.tfevents.1735498771.Taiyopen b/results/tb_logs/feudal__2024-12-30_02-59-31/events.out.tfevents.1735498771.Taiyopen
new file mode 100644
index 0000000..fdceffc
Binary files /dev/null and b/results/tb_logs/feudal__2024-12-30_02-59-31/events.out.tfevents.1735498771.Taiyopen differ
diff --git a/results/tb_logs/feudal__2024-12-30_03-00-37/events.out.tfevents.1735498837.Taiyopen b/results/tb_logs/feudal__2024-12-30_03-00-37/events.out.tfevents.1735498837.Taiyopen
new file mode 100644
index 0000000..741d390
Binary files /dev/null and b/results/tb_logs/feudal__2024-12-30_03-00-37/events.out.tfevents.1735498837.Taiyopen differ
diff --git a/results/tb_logs/feudal__2024-12-30_03-02-24/events.out.tfevents.1735498944.Taiyopen b/results/tb_logs/feudal__2024-12-30_03-02-24/events.out.tfevents.1735498944.Taiyopen
new file mode 100644
index 0000000..dbcd5ae
Binary files /dev/null and b/results/tb_logs/feudal__2024-12-30_03-02-24/events.out.tfevents.1735498944.Taiyopen differ
diff --git a/results/tb_logs/feudal__2024-12-30_03-10-51/events.out.tfevents.1735499451.Taiyopen b/results/tb_logs/feudal__2024-12-30_03-10-51/events.out.tfevents.1735499451.Taiyopen
new file mode 100644
index 0000000..dacbf96
Binary files /dev/null and b/results/tb_logs/feudal__2024-12-30_03-10-51/events.out.tfevents.1735499451.Taiyopen differ
diff --git a/results/tb_logs/feudal__2024-12-30_03-11-26/events.out.tfevents.1735499486.Taiyopen b/results/tb_logs/feudal__2024-12-30_03-11-26/events.out.tfevents.1735499486.Taiyopen
new file mode 100644
index 0000000..fccb184
Binary files /dev/null and b/results/tb_logs/feudal__2024-12-30_03-11-26/events.out.tfevents.1735499486.Taiyopen differ
diff --git a/results/tb_logs/feudal__2024-12-30_03-12-44/events.out.tfevents.1735499564.Taiyopen b/results/tb_logs/feudal__2024-12-30_03-12-44/events.out.tfevents.1735499564.Taiyopen
new file mode 100644
index 0000000..6343d85
Binary files /dev/null and b/results/tb_logs/feudal__2024-12-30_03-12-44/events.out.tfevents.1735499564.Taiyopen differ
diff --git a/results/tb_logs/feudal__2024-12-30_03-14-24/events.out.tfevents.1735499664.Taiyopen b/results/tb_logs/feudal__2024-12-30_03-14-24/events.out.tfevents.1735499664.Taiyopen
new file mode 100644
index 0000000..77b6a15
Binary files /dev/null and b/results/tb_logs/feudal__2024-12-30_03-14-24/events.out.tfevents.1735499664.Taiyopen differ
diff --git a/results/tb_logs/feudal__2024-12-30_03-18-05/events.out.tfevents.1735499885.Taiyopen b/results/tb_logs/feudal__2024-12-30_03-18-05/events.out.tfevents.1735499885.Taiyopen
new file mode 100644
index 0000000..55409dc
Binary files /dev/null and b/results/tb_logs/feudal__2024-12-30_03-18-05/events.out.tfevents.1735499885.Taiyopen differ
diff --git a/results/tb_logs/feudal__2024-12-30_03-19-47/events.out.tfevents.1735499987.Taiyopen b/results/tb_logs/feudal__2024-12-30_03-19-47/events.out.tfevents.1735499987.Taiyopen
new file mode 100644
index 0000000..3730b13
Binary files /dev/null and b/results/tb_logs/feudal__2024-12-30_03-19-47/events.out.tfevents.1735499987.Taiyopen differ
diff --git a/results/tb_logs/sc2_5m_vs_6m-obs_aid=1-obs_act=1/algo=qmix-agent=n_rnn/env_n=4/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2024-12-30_02-29-40/events.out.tfevents.1735496980.Taiyopen b/results/tb_logs/sc2_5m_vs_6m-obs_aid=1-obs_act=1/algo=qmix-agent=n_rnn/env_n=4/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2024-12-30_02-29-40/events.out.tfevents.1735496980.Taiyopen
new file mode 100644
index 0000000..6794924
Binary files /dev/null and b/results/tb_logs/sc2_5m_vs_6m-obs_aid=1-obs_act=1/algo=qmix-agent=n_rnn/env_n=4/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2024-12-30_02-29-40/events.out.tfevents.1735496980.Taiyopen differ
diff --git a/results/tb_logs/sc2_5m_vs_6m-obs_aid=1-obs_act=1/algo=qmix-agent=n_rnn/env_n=4/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2024-12-30_02-34-18/events.out.tfevents.1735497258.Taiyopen b/results/tb_logs/sc2_5m_vs_6m-obs_aid=1-obs_act=1/algo=qmix-agent=n_rnn/env_n=4/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2024-12-30_02-34-18/events.out.tfevents.1735497258.Taiyopen
new file mode 100644
index 0000000..4e5a7c1
Binary files /dev/null and b/results/tb_logs/sc2_5m_vs_6m-obs_aid=1-obs_act=1/algo=qmix-agent=n_rnn/env_n=4/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2024-12-30_02-34-18/events.out.tfevents.1735497258.Taiyopen differ
diff --git a/results/tb_logs/sc2_5m_vs_6m-obs_aid=1-obs_act=1/algo=qmix-agent=n_rnn/env_n=8/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2024-12-30_02-06-33/events.out.tfevents.1735495593.Taiyopen b/results/tb_logs/sc2_5m_vs_6m-obs_aid=1-obs_act=1/algo=qmix-agent=n_rnn/env_n=8/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2024-12-30_02-06-33/events.out.tfevents.1735495593.Taiyopen
new file mode 100644
index 0000000..e50c7ce
Binary files /dev/null and b/results/tb_logs/sc2_5m_vs_6m-obs_aid=1-obs_act=1/algo=qmix-agent=n_rnn/env_n=8/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2024-12-30_02-06-33/events.out.tfevents.1735495593.Taiyopen differ
diff --git a/results/tb_logs/sc2_5m_vs_6m-obs_aid=1-obs_act=1/algo=qmix-agent=n_rnn/env_n=8/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2024-12-30_02-07-51/events.out.tfevents.1735495671.Taiyopen b/results/tb_logs/sc2_5m_vs_6m-obs_aid=1-obs_act=1/algo=qmix-agent=n_rnn/env_n=8/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2024-12-30_02-07-51/events.out.tfevents.1735495671.Taiyopen
new file mode 100644
index 0000000..53333ae
Binary files /dev/null and b/results/tb_logs/sc2_5m_vs_6m-obs_aid=1-obs_act=1/algo=qmix-agent=n_rnn/env_n=8/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2024-12-30_02-07-51/events.out.tfevents.1735495671.Taiyopen differ
diff --git a/results/tb_logs/sc2_5m_vs_6m-obs_aid=1-obs_act=1/algo=qmix-agent=n_rnn/env_n=8/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2024-12-30_02-08-44/events.out.tfevents.1735495724.Taiyopen b/results/tb_logs/sc2_5m_vs_6m-obs_aid=1-obs_act=1/algo=qmix-agent=n_rnn/env_n=8/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2024-12-30_02-08-44/events.out.tfevents.1735495724.Taiyopen
new file mode 100644
index 0000000..0d389f1
Binary files /dev/null and b/results/tb_logs/sc2_5m_vs_6m-obs_aid=1-obs_act=1/algo=qmix-agent=n_rnn/env_n=8/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2024-12-30_02-08-44/events.out.tfevents.1735495724.Taiyopen differ
diff --git a/results/tb_logs/sc2_5m_vs_6m-obs_aid=1-obs_act=1/algo=qmix-agent=n_rnn/env_n=8/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2024-12-30_02-11-28/events.out.tfevents.1735495888.Taiyopen b/results/tb_logs/sc2_5m_vs_6m-obs_aid=1-obs_act=1/algo=qmix-agent=n_rnn/env_n=8/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2024-12-30_02-11-28/events.out.tfevents.1735495888.Taiyopen
new file mode 100644
index 0000000..43bcb8f
Binary files /dev/null and b/results/tb_logs/sc2_5m_vs_6m-obs_aid=1-obs_act=1/algo=qmix-agent=n_rnn/env_n=8/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2024-12-30_02-11-28/events.out.tfevents.1735495888.Taiyopen differ
diff --git a/results/tb_logs/sc2_5m_vs_6m-obs_aid=1-obs_act=1/algo=qmix-agent=n_rnn/env_n=8/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2024-12-30_02-14-47/events.out.tfevents.1735496087.Taiyopen b/results/tb_logs/sc2_5m_vs_6m-obs_aid=1-obs_act=1/algo=qmix-agent=n_rnn/env_n=8/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2024-12-30_02-14-47/events.out.tfevents.1735496087.Taiyopen
new file mode 100644
index 0000000..fdd9b58
Binary files /dev/null and b/results/tb_logs/sc2_5m_vs_6m-obs_aid=1-obs_act=1/algo=qmix-agent=n_rnn/env_n=8/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2024-12-30_02-14-47/events.out.tfevents.1735496087.Taiyopen differ
diff --git a/results/tb_logs/sc2_5m_vs_6m-obs_aid=1-obs_act=1/algo=qmix-agent=n_rnn/env_n=8/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2024-12-30_02-19-50/events.out.tfevents.1735496390.Taiyopen b/results/tb_logs/sc2_5m_vs_6m-obs_aid=1-obs_act=1/algo=qmix-agent=n_rnn/env_n=8/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2024-12-30_02-19-50/events.out.tfevents.1735496390.Taiyopen
new file mode 100644
index 0000000..297f59b
Binary files /dev/null and b/results/tb_logs/sc2_5m_vs_6m-obs_aid=1-obs_act=1/algo=qmix-agent=n_rnn/env_n=8/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2024-12-30_02-19-50/events.out.tfevents.1735496390.Taiyopen differ
diff --git a/results/tb_logs/sc2_5m_vs_6m-obs_aid=1-obs_act=1/algo=qmix-agent=n_rnn/env_n=8/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2024-12-30_02-23-08/events.out.tfevents.1735496588.Taiyopen b/results/tb_logs/sc2_5m_vs_6m-obs_aid=1-obs_act=1/algo=qmix-agent=n_rnn/env_n=8/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2024-12-30_02-23-08/events.out.tfevents.1735496588.Taiyopen
new file mode 100644
index 0000000..45fb04f
Binary files /dev/null and b/results/tb_logs/sc2_5m_vs_6m-obs_aid=1-obs_act=1/algo=qmix-agent=n_rnn/env_n=8/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2024-12-30_02-23-08/events.out.tfevents.1735496588.Taiyopen differ
diff --git a/results/tb_logs/sc2_5m_vs_6m-obs_aid=1-obs_act=1/algo=qmix-agent=n_rnn/env_n=8/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2024-12-30_03-15-59/events.out.tfevents.1735499759.Taiyopen b/results/tb_logs/sc2_5m_vs_6m-obs_aid=1-obs_act=1/algo=qmix-agent=n_rnn/env_n=8/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2024-12-30_03-15-59/events.out.tfevents.1735499759.Taiyopen
new file mode 100644
index 0000000..cd47155
Binary files /dev/null and b/results/tb_logs/sc2_5m_vs_6m-obs_aid=1-obs_act=1/algo=qmix-agent=n_rnn/env_n=8/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2024-12-30_03-15-59/events.out.tfevents.1735499759.Taiyopen differ
diff --git a/results/tb_logs/sc2_5m_vs_6m-obs_aid=1-obs_act=1/algo=qmix-agent=n_rnn/env_n=8/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2024-12-30_03-17-02/events.out.tfevents.1735499822.Taiyopen b/results/tb_logs/sc2_5m_vs_6m-obs_aid=1-obs_act=1/algo=qmix-agent=n_rnn/env_n=8/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2024-12-30_03-17-02/events.out.tfevents.1735499822.Taiyopen
new file mode 100644
index 0000000..af7a148
Binary files /dev/null and b/results/tb_logs/sc2_5m_vs_6m-obs_aid=1-obs_act=1/algo=qmix-agent=n_rnn/env_n=8/mixer=qmix/rnn_dim=64-2bs=5000_128-tdlambda=0.6-epdec_0.05=100k/qmix__2024-12-30_03-17-02/events.out.tfevents.1735499822.Taiyopen differ
diff --git a/src/__init__.py b/src/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/components/__init__.py b/src/components/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/components/action_selectors.py b/src/components/action_selectors.py
new file mode 100644
index 0000000..33ee935
--- /dev/null
+++ b/src/components/action_selectors.py
@@ -0,0 +1,185 @@
+import torch as th
+from torch.distributions import Categorical
+from torch.distributions.one_hot_categorical import OneHotCategorical
+
+from .epsilon_schedules import DecayThenFlatSchedule
+
+
+class GumbelSoftmax(OneHotCategorical):
+
+ def __init__(self, logits, probs=None, temperature=1):
+ super(GumbelSoftmax, self).__init__(logits=logits, probs=probs)
+ self.eps = 1e-20
+ self.temperature = temperature
+
+ def sample_gumbel(self):
+ U = self.logits.clone()
+ U.uniform_(0, 1)
+ return -th.log(-th.log(U + self.eps))
+
+ def gumbel_softmax_sample(self):
+ y = self.logits + self.sample_gumbel()
+ return th.softmax(y / self.temperature, dim=-1)
+
+ def hard_gumbel_softmax_sample(self):
+ y = self.gumbel_softmax_sample()
+ return (th.max(y, dim=-1, keepdim=True)[0] == y).float()
+
+ def rsample(self):
+ return self.gumbel_softmax_sample()
+
+ def sample(self):
+ return self.rsample().detach()
+
+ def hard_sample(self):
+ return self.hard_gumbel_softmax_sample()
+
+
+def multinomial_entropy(logits):
+ assert logits.size(-1) > 1
+ return GumbelSoftmax(logits=logits).entropy()
+
+
+REGISTRY = {}
+
+
+class GumbelSoftmaxMultinomialActionSelector():
+
+ def __init__(self, args):
+ self.args = args
+
+ self.schedule = DecayThenFlatSchedule(args.epsilon_start, args.epsilon_finish, args.epsilon_anneal_time,
+ decay="linear")
+ self.epsilon = self.schedule.eval(0)
+ self.test_greedy = getattr(args, "test_greedy", True)
+ self.save_probs = getattr(self.args, 'save_probs', False)
+
+ def select_action(self, agent_logits, avail_actions, t_env, test_mode=False):
+ masked_policies = agent_logits.clone()
+ self.epsilon = self.schedule.eval(t_env)
+
+ if test_mode and self.test_greedy:
+ picked_actions = masked_policies.max(dim=2)[1]
+ else:
+ picked_actions = GumbelSoftmax(logits=masked_policies).sample()
+ picked_actions = th.argmax(picked_actions, dim=-1).long()
+
+ if self.save_probs:
+ return picked_actions, masked_policies
+ else:
+ return picked_actions
+
+
+REGISTRY["gumbel"] = GumbelSoftmaxMultinomialActionSelector
+
+
+class MultinomialActionSelector():
+
+ def __init__(self, args):
+ self.args = args
+
+ self.schedule = DecayThenFlatSchedule(args.epsilon_start, args.epsilon_finish, args.epsilon_anneal_time,
+ decay="linear")
+ self.epsilon = self.schedule.eval(0)
+
+ self.test_greedy = getattr(args, "test_greedy", True)
+ self.save_probs = getattr(self.args, 'save_probs', False)
+
+ def select_action(self, agent_inputs, avail_actions, t_env, test_mode=False):
+ masked_policies = agent_inputs.clone()
+ masked_policies[avail_actions == 0] = 0
+ masked_policies = masked_policies / (masked_policies.sum(-1, keepdim=True) + 1e-8)
+
+ if test_mode and self.test_greedy:
+ picked_actions = masked_policies.max(dim=2)[1]
+ else:
+ self.epsilon = self.schedule.eval(t_env)
+
+ epsilon_action_num = (avail_actions.sum(-1, keepdim=True) + 1e-8)
+ masked_policies = ((1 - self.epsilon) * masked_policies
+ + avail_actions * self.epsilon / epsilon_action_num)
+ masked_policies[avail_actions == 0] = 0
+
+ picked_actions = Categorical(masked_policies).sample().long()
+
+ if self.save_probs:
+ return picked_actions, masked_policies
+ else:
+ return picked_actions
+
+
+REGISTRY["multinomial"] = MultinomialActionSelector
+
+
+def categorical_entropy(probs):
+ assert probs.size(-1) > 1
+ return Categorical(probs=probs).entropy()
+
+
+class EpsilonGreedyActionSelector():
+
+ def __init__(self, args):
+ self.args = args
+
+ self.schedule = DecayThenFlatSchedule(args.epsilon_start, args.epsilon_finish, args.epsilon_anneal_time,
+ decay="linear")
+ self.epsilon = self.schedule.eval(0)
+
+ def select_action(self, agent_inputs, avail_actions, t_env, test_mode=False):
+ # Assuming agent_inputs is a batch of Q-Values for each agent bav
+ self.epsilon = self.schedule.eval(t_env)
+
+ if test_mode:
+ # Greedy action selection only
+ self.epsilon = getattr(self.args, "test_noise", 0.0)
+
+ # mask actions that are excluded from selection
+ masked_q_values = agent_inputs.clone()
+ masked_q_values[avail_actions == 0] = -float("inf") # should never be selected!
+
+ # random_numbers = th.rand_like(agent_inputs[:, :, 0]) # TODO: 为啥GPU和CPU model inference结果不同
+ random_numbers = th.rand(size=agent_inputs[:, :, 0].size(), dtype=th.float32, device="cpu").to(
+ agent_inputs.device)
+
+ pick_random = (random_numbers < self.epsilon).long()
+ # random_actions = Categorical(avail_actions.float()).sample().long()
+ random_actions = Categorical(avail_actions.cpu().float()).sample().long().to(avail_actions.device)
+
+ picked_actions = pick_random * random_actions + (1 - pick_random) * masked_q_values.max(dim=2)[1]
+ return picked_actions
+
+
+REGISTRY["epsilon_greedy"] = EpsilonGreedyActionSelector
+
+
+class GaussianActionSelector():
+
+ def __init__(self, args):
+ self.args = args
+ self.test_greedy = getattr(args, "test_greedy", True)
+
+ def select_action(self, mu, sigma, test_mode=False):
+ # Expects the following input dimensions:
+ # mu: [b x a x u]
+ # sigma: [b x a x u x u]
+ assert mu.dim() == 3, "incorrect input dim: mu"
+ assert sigma.dim() == 3, "incorrect input dim: sigma"
+ sigma = sigma.view(-1, self.args.n_agents, self.args.n_actions, self.args.n_actions)
+
+ if test_mode and self.test_greedy:
+ picked_actions = mu
+ else:
+ dst = th.distributions.MultivariateNormal(mu.view(-1,
+ mu.shape[-1]),
+ sigma.view(-1,
+ mu.shape[-1],
+ mu.shape[-1]))
+ try:
+ picked_actions = dst.sample().view(*mu.shape)
+ except Exception as e:
+ a = 5
+ pass
+ return picked_actions
+
+
+REGISTRY["gaussian"] = GaussianActionSelector
diff --git a/src/components/episode_buffer.py b/src/components/episode_buffer.py
new file mode 100644
index 0000000..584d0a8
--- /dev/null
+++ b/src/components/episode_buffer.py
@@ -0,0 +1,361 @@
+import random
+from types import SimpleNamespace as SN
+
+import numpy as np
+import torch as th
+
+from .segment_tree import SumSegmentTree, MinSegmentTree
+
+
+class EpisodeBatch:
+ def __init__(self,
+ scheme,
+ groups,
+ batch_size,
+ max_seq_length,
+ data=None,
+ preprocess=None,
+ device="cpu"):
+ self.scheme = scheme.copy()
+ self.groups = groups
+ self.batch_size = batch_size
+ self.max_seq_length = max_seq_length
+ self.preprocess = {} if preprocess is None else preprocess
+ self.device = device
+
+ if data is not None:
+ self.data = data
+ else:
+ self.data = SN()
+ self.data.transition_data = {}
+ self.data.episode_data = {}
+ self._setup_data(self.scheme, self.groups, batch_size, max_seq_length, self.preprocess)
+
+ def _setup_data(self, scheme, groups, batch_size, max_seq_length, preprocess):
+ if preprocess is not None:
+ for k in preprocess:
+ assert k in scheme
+ new_k = preprocess[k][0]
+ transforms = preprocess[k][1]
+
+ vshape = self.scheme[k]["vshape"]
+ dtype = self.scheme[k]["dtype"]
+ for transform in transforms:
+ vshape, dtype = transform.infer_output_info(vshape, dtype)
+
+ self.scheme[new_k] = {
+ "vshape": vshape,
+ "dtype": dtype
+ }
+ if "group" in self.scheme[k]:
+ self.scheme[new_k]["group"] = self.scheme[k]["group"]
+ if "episode_const" in self.scheme[k]:
+ self.scheme[new_k]["episode_const"] = self.scheme[k]["episode_const"]
+
+ assert "filled" not in scheme, '"filled" is a reserved key for masking.'
+ scheme.update({
+ "filled": {"vshape": (1,), "dtype": th.long},
+ })
+
+ for field_key, field_info in scheme.items():
+ assert "vshape" in field_info, "Scheme must define vshape for {}".format(field_key)
+ vshape = field_info["vshape"]
+ episode_const = field_info.get("episode_const", False)
+ group = field_info.get("group", None)
+ dtype = field_info.get("dtype", th.float32)
+
+ if isinstance(vshape, int):
+ vshape = (vshape,)
+
+ if group:
+ assert group in groups, "Group {} must have its number of members defined in _groups_".format(group)
+ shape = (groups[group], *vshape)
+ else:
+ shape = vshape
+
+ if episode_const:
+ self.data.episode_data[field_key] = th.zeros((batch_size, *shape), dtype=dtype, device=self.device)
+ else:
+ self.data.transition_data[field_key] = th.zeros((batch_size, max_seq_length, *shape), dtype=dtype,
+ device=self.device)
+
+ def extend(self, scheme, groups=None):
+ raise NotImplementedError
+ self._setup_data(scheme, self.groups if groups is None else groups, self.batch_size, self.max_seq_length)
+
+ def to(self, device):
+ for k, v in self.data.transition_data.items():
+ self.data.transition_data[k] = v.to(device)
+ for k, v in self.data.episode_data.items():
+ self.data.episode_data[k] = v.to(device)
+ self.device = device
+
+ def update(self, data, bs=slice(None), ts=slice(None), mark_filled=True):
+ slices = self._parse_slices((bs, ts))
+ for k, v in data.items():
+ if k in self.data.transition_data:
+ target = self.data.transition_data
+ if mark_filled:
+ target["filled"][slices] = 1
+ mark_filled = False
+ _slices = slices
+ elif k in self.data.episode_data:
+ target = self.data.episode_data
+ _slices = slices[0]
+ else:
+ raise KeyError("{} not found in transition or episode data".format(k))
+
+ dtype = self.scheme[k].get("dtype", th.float32)
+ v = th.tensor(v, dtype=dtype, device=self.device)
+ self._check_safe_view(k, v, target[k][_slices])
+ target[k][_slices] = v.view_as(target[k][_slices])
+
+ if k in self.preprocess:
+ new_k = self.preprocess[k][0]
+ v = target[k][_slices]
+ for transform in self.preprocess[k][1]:
+ v = transform.transform(v)
+ target[new_k][_slices] = v.view_as(target[new_k][_slices])
+
+ def _check_safe_view(self, k, v, dest):
+ idx = len(v.shape) - 1
+ for s in dest.shape[::-1]:
+ if v.shape[idx] != s:
+ if s != 1:
+ raise ValueError("{}: unsafe reshape of {} to {}".format(k, v.shape, dest.shape))
+ else:
+ idx -= 1
+
+ def __getitem__(self, item):
+ if isinstance(item, str):
+ if item in self.data.episode_data:
+ return self.data.episode_data[item]
+ elif item in self.data.transition_data:
+ return self.data.transition_data[item]
+ else:
+ raise ValueError
+ elif isinstance(item, tuple) and all([isinstance(it, str) for it in item]):
+ new_data = self._new_data_sn()
+ for key in item:
+ if key in self.data.transition_data:
+ new_data.transition_data[key] = self.data.transition_data[key]
+ elif key in self.data.episode_data:
+ new_data.episode_data[key] = self.data.episode_data[key]
+ else:
+ raise KeyError("Unrecognised key {}".format(key))
+
+ # Update the scheme to only have the requested keys
+ new_scheme = {key: self.scheme[key] for key in item}
+ new_groups = {self.scheme[key]["group"]: self.groups[self.scheme[key]["group"]]
+ for key in item if "group" in self.scheme[key]}
+ ret = EpisodeBatch(new_scheme, new_groups, self.batch_size, self.max_seq_length, data=new_data,
+ device=self.device)
+ return ret
+ else:
+ item = self._parse_slices(item)
+ new_data = self._new_data_sn()
+ for k, v in self.data.transition_data.items():
+ new_data.transition_data[k] = v[item]
+ for k, v in self.data.episode_data.items():
+ new_data.episode_data[k] = v[item[0]]
+
+ ret_bs = self._get_num_items(item[0], self.batch_size)
+ ret_max_t = self._get_num_items(item[1], self.max_seq_length)
+
+ ret = EpisodeBatch(self.scheme, self.groups, ret_bs, ret_max_t, data=new_data, device=self.device)
+ return ret
+
+ def _get_num_items(self, indexing_item, max_size):
+ if isinstance(indexing_item, list) or isinstance(indexing_item, np.ndarray):
+ return len(indexing_item)
+ elif isinstance(indexing_item, slice):
+ _range = indexing_item.indices(max_size)
+ return 1 + (_range[1] - _range[0] - 1) // _range[2]
+
+ def _new_data_sn(self):
+ new_data = SN()
+ new_data.transition_data = {}
+ new_data.episode_data = {}
+ return new_data
+
+ def _parse_slices(self, items):
+ parsed = []
+ # Only batch slice given, add full time slice
+ if (isinstance(items, slice) # slice a:b
+ or isinstance(items, int) # int i
+ or (isinstance(items, (list, np.ndarray, th.LongTensor, th.cuda.LongTensor))) # [a,b,c]
+ ):
+ items = (items, slice(None))
+
+ # Need the time indexing to be contiguous
+ if isinstance(items[1], list):
+ raise IndexError("Indexing across Time must be contiguous")
+
+ for item in items:
+ # TODO: stronger checks to ensure only supported options get through
+ if isinstance(item, int):
+ # Convert single indices to slices
+ parsed.append(slice(item, item + 1))
+ else:
+ # Leave slices and lists as is
+ parsed.append(item)
+ return parsed
+
+ def max_t_filled(self):
+ return th.sum(self.data.transition_data["filled"], 1).max(0)[0]
+
+ def __repr__(self):
+ return "EpisodeBatch. Batch Size:{} Max_seq_len:{} Keys:{} Groups:{}".format(self.batch_size,
+ self.max_seq_length,
+ self.scheme.keys(),
+ self.groups.keys())
+
+ def split(self, split_num):
+ if split_num == 1:
+ return [self]
+ split_data = []
+ batch_size = self.batch_size // split_num
+ for i in range(split_num):
+ start_idx = i * batch_size
+ split_data.append(self[start_idx: start_idx + batch_size if i != split_num - 1 else self.batch_size])
+ return split_data
+
+
+class ReplayBuffer(EpisodeBatch):
+ def __init__(self, scheme, groups, buffer_size, max_seq_length, preprocess=None, device="cpu"):
+ super(ReplayBuffer, self).__init__(scheme, groups, buffer_size, max_seq_length, preprocess=preprocess,
+ device=device)
+ self.buffer_size = buffer_size # same as self.batch_size but more explicit
+ self.buffer_index = 0
+ self.episodes_in_buffer = 0
+
+ def insert_episode_batch(self, ep_batch):
+ if self.buffer_index + ep_batch.batch_size <= self.buffer_size:
+ self.update(ep_batch.data.transition_data,
+ slice(self.buffer_index, self.buffer_index + ep_batch.batch_size),
+ slice(0, ep_batch.max_seq_length),
+ mark_filled=False)
+ self.update(ep_batch.data.episode_data,
+ slice(self.buffer_index, self.buffer_index + ep_batch.batch_size))
+ self.buffer_index = (self.buffer_index + ep_batch.batch_size)
+ self.episodes_in_buffer = max(self.episodes_in_buffer, self.buffer_index)
+ self.buffer_index = self.buffer_index % self.buffer_size
+ assert self.buffer_index < self.buffer_size
+ else:
+ buffer_left = self.buffer_size - self.buffer_index
+ self.insert_episode_batch(ep_batch[0:buffer_left, :])
+ self.insert_episode_batch(ep_batch[buffer_left:, :])
+
+ def can_sample(self, batch_size):
+ return self.episodes_in_buffer >= batch_size
+
+ def sample(self, batch_size):
+ assert self.can_sample(batch_size)
+ if self.episodes_in_buffer == batch_size:
+ return self[:batch_size]
+ else:
+ # Uniform sampling only atm
+ ep_ids = np.random.choice(self.episodes_in_buffer, batch_size, replace=False)
+ return self[ep_ids]
+
+ def uni_sample(self, batch_size):
+ return self.sample(batch_size)
+
+ def sample_latest(self, batch_size):
+ assert self.can_sample(batch_size)
+ if self.buffer_index - batch_size < 0:
+ # Uniform sampling
+ return self.uni_sample(batch_size)
+ else:
+ # Return the latest
+ return self[self.buffer_index - batch_size: self.buffer_index]
+
+ def __repr__(self):
+ return "ReplayBuffer. {}/{} episodes. Keys:{} Groups:{}".format(self.episodes_in_buffer,
+ self.buffer_size,
+ self.scheme.keys(),
+ self.groups.keys())
+
+
+# Adapted from the OpenAI Baseline implementations (https://github.com/openai/baselines)
+class PrioritizedReplayBuffer(ReplayBuffer):
+ def __init__(self, scheme, groups, buffer_size, max_seq_length, alpha, beta, t_max, preprocess=None, device="cpu"):
+ super(PrioritizedReplayBuffer, self).__init__(scheme, groups, buffer_size, max_seq_length,
+ preprocess=preprocess, device="cpu")
+ self.alpha = alpha
+ self.beta_original = beta
+ self.beta = beta
+ self.beta_increment = (1.0 - beta) / t_max
+ self.max_priority = 1.0
+
+ it_capacity = 1
+ while it_capacity < buffer_size:
+ it_capacity *= 2
+
+ self._it_sum = SumSegmentTree(it_capacity)
+ self._it_min = MinSegmentTree(it_capacity)
+
+ def insert_episode_batch(self, ep_batch):
+ # TODO: convert batch/episode to idx?
+ pre_idx = self.buffer_index
+ super().insert_episode_batch(ep_batch)
+ idx = self.buffer_index
+ if idx >= pre_idx:
+ for i in range(idx - pre_idx):
+ self._it_sum[pre_idx + i] = self.max_priority ** self.alpha
+ self._it_min[pre_idx + i] = self.max_priority ** self.alpha
+ else:
+ for i in range(self.buffer_size - pre_idx):
+ self._it_sum[pre_idx + i] = self.max_priority ** self.alpha
+ self._it_min[pre_idx + i] = self.max_priority ** self.alpha
+ for i in range(self.buffer_index):
+ self._it_sum[i] = self.max_priority ** self.alpha
+ self._it_min[i] = self.max_priority ** self.alpha
+
+ def _sample_proportional(self, batch_size):
+ res = []
+ p_total = self._it_sum.sum(0, self.episodes_in_buffer - 1)
+ every_range_len = p_total / batch_size
+ for i in range(batch_size):
+ mass = random.random() * every_range_len + i * every_range_len
+ idx = self._it_sum.find_prefixsum_idx(mass)
+ res.append(idx)
+ return res
+
+ def sample(self, batch_size, t):
+ assert self.can_sample(batch_size)
+ self.beta = self.beta_original + (t * self.beta_increment)
+
+ idxes = self._sample_proportional(batch_size)
+ weights = []
+ p_min = self._it_min.min() / self._it_sum.sum()
+ max_weight = (p_min * self.episodes_in_buffer) ** (-self.beta)
+
+ for idx in idxes:
+ p_sample = self._it_sum[idx] / self._it_sum.sum()
+ weight = (p_sample * self.episodes_in_buffer) ** (-self.beta)
+ weights.append(weight / max_weight)
+ weights = np.array(weights)
+
+ return self[idxes], idxes, weights
+
+ def update_priorities(self, idxes, priorities):
+ """Update priorities of sampled transitions.
+ sets priority of transition at index idxes[i] in buffer
+ to priorities[i].
+ Parameters
+ ----------
+ idxes: [int]
+ List of idxes of sampled transitions
+ priorities: [float]
+ List of updated priorities corresponding to
+ transitions at the sampled idxes denoted by
+ variable `idxes`.
+ """
+ assert len(idxes) == len(priorities)
+ for idx, priority in zip(idxes, priorities):
+ assert priority > 0
+ assert 0 <= idx < self.episodes_in_buffer
+ self._it_sum[idx] = priority ** self.alpha
+ self._it_min[idx] = priority ** self.alpha
+ self.max_priority = max(self.max_priority, priority)
diff --git a/src/components/epsilon_schedules.py b/src/components/epsilon_schedules.py
new file mode 100644
index 0000000..eb670e6
--- /dev/null
+++ b/src/components/epsilon_schedules.py
@@ -0,0 +1,40 @@
+import numpy as np
+
+
+class DecayThenFlatSchedule():
+
+ def __init__(self,
+ start,
+ finish,
+ time_length,
+ decay="exp"):
+
+ self.start = start
+ self.finish = finish
+ self.time_length = time_length
+ self.delta = (self.start - self.finish) / self.time_length
+ self.decay = decay
+
+ if self.decay in ["exp"]:
+ self.exp_scaling = (-1) * self.time_length / np.log(self.finish) if self.finish > 0 else 1
+
+ def eval(self, T):
+ if self.decay in ["linear"]:
+ return max(self.finish, self.start - self.delta * T)
+ elif self.decay in ["exp"]:
+ return min(self.start, max(self.finish, np.exp(- T / self.exp_scaling)))
+
+
+class LinearIncreaseSchedule():
+
+ def __init__(self,
+ start,
+ finish,
+ time_length):
+ self.start = start
+ self.finish = finish
+ self.time_length = time_length
+ self.delta = (self.start - self.finish) / self.time_length
+
+ def eval(self, T):
+ return min(self.finish, self.start - self.delta * T)
diff --git a/src/components/segment_tree.py b/src/components/segment_tree.py
new file mode 100644
index 0000000..263805f
--- /dev/null
+++ b/src/components/segment_tree.py
@@ -0,0 +1,136 @@
+import operator
+
+
+# Directly from OpenAI Baseline implementation (https://github.com/openai/baselines)
+class SegmentTree(object):
+ def __init__(self, capacity, operation, neutral_element):
+ """Build a Segment Tree data structure.
+ https://en.wikipedia.org/wiki/Segment_tree
+ Can be used as regular array, but with two
+ important differences:
+ a) setting item's value is slightly slower.
+ It is O(lg capacity) instead of O(1).
+ b) user has access to an efficient ( O(log segment size) )
+ `reduce` operation which reduces `operation` over
+ a contiguous subsequence of items in the array.
+ Paramters
+ ---------
+ capacity: int
+ Total size of the array - must be a power of two.
+ operation: lambda obj, obj -> obj
+ and operation for combining elements (eg. sum, max)
+ must form a mathematical group together with the set of
+ possible values for array elements (i.e. be associative)
+ neutral_element: obj
+ neutral element for the operation above. eg. float('-inf')
+ for max and 0 for sum.
+ """
+ assert capacity > 0 and capacity & (capacity - 1) == 0, "capacity must be positive and a power of 2."
+ self._capacity = capacity
+ self._value = [neutral_element for _ in range(2 * capacity)]
+ self._operation = operation
+
+ def _reduce_helper(self, start, end, node, node_start, node_end):
+ if start == node_start and end == node_end:
+ return self._value[node]
+ mid = (node_start + node_end) // 2
+ if end <= mid:
+ return self._reduce_helper(start, end, 2 * node, node_start, mid)
+ else:
+ if mid + 1 <= start:
+ return self._reduce_helper(start, end, 2 * node + 1, mid + 1, node_end)
+ else:
+ return self._operation(
+ self._reduce_helper(start, mid, 2 * node, node_start, mid),
+ self._reduce_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end)
+ )
+
+ def reduce(self, start=0, end=None):
+ """Returns result of applying `self.operation`
+ to a contiguous subsequence of the array.
+ self.operation(arr[start], operation(arr[start+1], operation(... arr[end])))
+ Parameters
+ ----------
+ start: int
+ beginning of the subsequence
+ end: int
+ end of the subsequences
+ Returns
+ -------
+ reduced: obj
+ result of reducing self.operation over the specified range of array elements.
+ """
+ if end is None:
+ end = self._capacity
+ if end < 0:
+ end += self._capacity
+ end -= 1
+ return self._reduce_helper(start, end, 1, 0, self._capacity - 1)
+
+ def __setitem__(self, idx, val):
+ # index of the leaf
+ idx += self._capacity
+ self._value[idx] = val
+ idx //= 2
+ while idx >= 1:
+ self._value[idx] = self._operation(
+ self._value[2 * idx],
+ self._value[2 * idx + 1]
+ )
+ idx //= 2
+
+ def __getitem__(self, idx):
+ assert 0 <= idx < self._capacity
+ return self._value[self._capacity + idx]
+
+
+class SumSegmentTree(SegmentTree):
+ def __init__(self, capacity):
+ super(SumSegmentTree, self).__init__(
+ capacity=capacity,
+ operation=operator.add,
+ neutral_element=0.0
+ )
+
+ def sum(self, start=0, end=None):
+ """Returns arr[start] + ... + arr[end]"""
+ return super(SumSegmentTree, self).reduce(start, end)
+
+ def find_prefixsum_idx(self, prefixsum):
+ """Find the highest index `i` in the array such that
+ sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum
+ if array values are probabilities, this function
+ allows to sample indexes according to the discrete
+ probability efficiently.
+ Parameters
+ ----------
+ perfixsum: float
+ upperbound on the sum of array prefix
+ Returns
+ -------
+ idx: int
+ highest index satisfying the prefixsum constraint
+ """
+ assert 0 <= prefixsum <= self.sum() + 1e-5
+ idx = 1
+ while idx < self._capacity: # while non-leaf
+ if self._value[2 * idx] > prefixsum:
+ idx = 2 * idx
+ else:
+ prefixsum -= self._value[2 * idx]
+ idx = 2 * idx + 1
+ return idx - self._capacity
+
+
+class MinSegmentTree(SegmentTree):
+ def __init__(self, capacity):
+ super(MinSegmentTree, self).__init__(
+ capacity=capacity,
+ operation=min,
+ neutral_element=float('inf')
+ )
+
+ def min(self, start=0, end=None):
+ """Returns min(arr[start], ..., arr[end])"""
+
+ return super(MinSegmentTree, self).reduce(start, end)
diff --git a/src/components/transforms.py b/src/components/transforms.py
new file mode 100644
index 0000000..98d72f6
--- /dev/null
+++ b/src/components/transforms.py
@@ -0,0 +1,22 @@
+import torch as th
+
+
+class Transform:
+ def transform(self, tensor):
+ raise NotImplementedError
+
+ def infer_output_info(self, vshape_in, dtype_in):
+ raise NotImplementedError
+
+
+class OneHot(Transform):
+ def __init__(self, out_dim):
+ self.out_dim = out_dim
+
+ def transform(self, tensor):
+ y_onehot = tensor.new(*tensor.shape[:-1], self.out_dim).zero_()
+ y_onehot.scatter_(-1, tensor.long(), 1)
+ return y_onehot.float()
+
+ def infer_output_info(self, vshape_in, dtype_in):
+ return (self.out_dim,), th.float32
diff --git a/src/config/algs/asn.yaml b/src/config/algs/asn.yaml
new file mode 100644
index 0000000..0743d3a
--- /dev/null
+++ b/src/config/algs/asn.yaml
@@ -0,0 +1,35 @@
+# --- VDN specific parameters ---
+
+# use epsilon greedy action selector
+action_selector: "epsilon_greedy"
+epsilon_start: 1.0
+epsilon_finish: 0.05
+epsilon_anneal_time: 100000 # 500000 for 6h_vs_8z
+
+runner: "parallel"
+batch_size_run: 8
+buffer_size: 5000
+batch_size: 128
+
+t_max: 10050000
+
+# update the target network every {} episodes
+target_update_interval: 200
+
+# use the Q_Learner to train
+mac: "n_mac"
+agent: "asn_rnn"
+rnn_hidden_dim: 64 # Size of hidden state for default rnn agent
+#rnn_hidden_dim: 128 # Size of hidden state for default rnn agent
+agent_output_type: q
+
+learner: "nq_learner"
+mixer: "vdn"
+mixing_embed_dim: 32
+hypernet_embed: 64
+lr: 0.001 # Learning rate for agents
+td_lambda: 0.6
+optimizer: 'adam'
+q_lambda: False
+
+name: "asn"
diff --git a/src/config/algs/deepset_hyper_qmix.yaml b/src/config/algs/deepset_hyper_qmix.yaml
new file mode 100644
index 0000000..e474f4a
--- /dev/null
+++ b/src/config/algs/deepset_hyper_qmix.yaml
@@ -0,0 +1,38 @@
+# --- VDN specific parameters ---
+
+# use epsilon greedy action selector
+action_selector: "epsilon_greedy"
+epsilon_start: 1.0
+epsilon_finish: 0.05
+epsilon_anneal_time: 100000 # 500000 for 6h_vs_8z
+
+runner: "parallel"
+batch_size_run: 8
+buffer_size: 5000
+batch_size: 128
+
+t_max: 10050000
+
+# update the target network every {} episodes
+target_update_interval: 200
+
+# use the Q_Learner to train
+mac: "hpn_mac"
+agent: "deepset_hyper_rnn"
+hpn_hyper_dim: 64
+agent_output_type: q
+
+learner: "nq_learner"
+mixer: "qmix"
+mixing_embed_dim: 32
+hypernet_embed: 64
+lr: 0.001 # Learning rate for agents
+td_lambda: 0.6
+optimizer: 'adam'
+q_lambda: False
+
+#name: "vdn_env=8_adam_td_lambda"
+name: "deepset_hyper_qmix"
+
+obs_agent_id: True # Include the agent's one_hot id in the observation
+obs_last_action: False # Include the agent's last action (one_hot) in the observation
\ No newline at end of file
diff --git a/src/config/algs/deepset_hyper_vdn.yaml b/src/config/algs/deepset_hyper_vdn.yaml
new file mode 100644
index 0000000..fbc227b
--- /dev/null
+++ b/src/config/algs/deepset_hyper_vdn.yaml
@@ -0,0 +1,38 @@
+# --- VDN specific parameters ---
+
+# use epsilon greedy action selector
+action_selector: "epsilon_greedy"
+epsilon_start: 1.0
+epsilon_finish: 0.05
+epsilon_anneal_time: 100000 # 500000 for 6h_vs_8z
+
+runner: "parallel"
+batch_size_run: 8
+buffer_size: 5000
+batch_size: 128
+
+t_max: 10050000
+
+# update the target network every {} episodes
+target_update_interval: 200
+
+# use the Q_Learner to train
+mac: "hpn_mac"
+agent: "deepset_hyper_rnn"
+hpn_hyper_dim: 64
+agent_output_type: q
+
+learner: "nq_learner"
+mixer: "vdn"
+mixing_embed_dim: 32
+hypernet_embed: 64
+lr: 0.001 # Learning rate for agents
+td_lambda: 0.6
+optimizer: 'adam'
+q_lambda: False
+
+#name: "vdn_env=8_adam_td_lambda"
+name: "deepset_hyper_vdn"
+
+obs_agent_id: True # Include the agent's one_hot id in the observation
+obs_last_action: False # Include the agent's last action (one_hot) in the observation
\ No newline at end of file
diff --git a/src/config/algs/deepset_qmix.yaml b/src/config/algs/deepset_qmix.yaml
new file mode 100644
index 0000000..e9a2a53
--- /dev/null
+++ b/src/config/algs/deepset_qmix.yaml
@@ -0,0 +1,38 @@
+# --- VDN specific parameters ---
+
+# use epsilon greedy action selector
+action_selector: "epsilon_greedy"
+epsilon_start: 1.0
+epsilon_finish: 0.05
+epsilon_anneal_time: 100000 # 500000 for 6h_vs_8z
+
+runner: "parallel"
+batch_size_run: 8
+buffer_size: 5000
+batch_size: 128
+
+t_max: 10050000
+
+# update the target network every {} episodes
+target_update_interval: 200
+
+# use the Q_Learner to train
+mac: "hpn_mac"
+agent: "deepset_rnn"
+hpn_hyper_dim: 64
+agent_output_type: q
+
+learner: "nq_learner"
+mixer: "qmix"
+mixing_embed_dim: 32
+hypernet_embed: 64
+lr: 0.001 # Learning rate for agents
+td_lambda: 0.6
+optimizer: 'adam'
+q_lambda: False
+
+#name: "vdn_env=8_adam_td_lambda"
+name: "deepset_qmix"
+
+obs_agent_id: True # Include the agent's one_hot id in the observation
+obs_last_action: False # Include the agent's last action (one_hot) in the observation
\ No newline at end of file
diff --git a/src/config/algs/deepset_vdn.yaml b/src/config/algs/deepset_vdn.yaml
new file mode 100644
index 0000000..27d5154
--- /dev/null
+++ b/src/config/algs/deepset_vdn.yaml
@@ -0,0 +1,38 @@
+# --- VDN specific parameters ---
+
+# use epsilon greedy action selector
+action_selector: "epsilon_greedy"
+epsilon_start: 1.0
+epsilon_finish: 0.05
+epsilon_anneal_time: 100000 # 500000 for 6h_vs_8z
+
+runner: "parallel"
+batch_size_run: 8
+buffer_size: 5000
+batch_size: 128
+
+t_max: 10050000
+
+# update the target network every {} episodes
+target_update_interval: 200
+
+# use the Q_Learner to train
+mac: "hpn_mac"
+agent: "deepset_rnn"
+hpn_hyper_dim: 64
+agent_output_type: q
+
+learner: "nq_learner"
+mixer: "vdn"
+mixing_embed_dim: 32
+hypernet_embed: 64
+lr: 0.001 # Learning rate for agents
+td_lambda: 0.6
+optimizer: 'adam'
+q_lambda: False
+
+#name: "vdn_env=8_adam_td_lambda"
+name: "deepset_vdn"
+
+obs_agent_id: True # Include the agent's one_hot id in the observation
+obs_last_action: False # Include the agent's last action (one_hot) in the observation
\ No newline at end of file
diff --git a/src/config/algs/feudal.yaml b/src/config/algs/feudal.yaml
new file mode 100644
index 0000000..7d7732e
--- /dev/null
+++ b/src/config/algs/feudal.yaml
@@ -0,0 +1,49 @@
+# --- QMIX specific parameters ---
+
+# use epsilon greedy action selector
+action_selector: "epsilon_greedy"
+epsilon_start: 1.0
+epsilon_finish: 0.05
+epsilon_anneal_time: 100000
+
+runner: "episode"
+batch_size_run: 4
+buffer_size: 5000
+batch_size: 128
+
+# update the target network every {} episodes
+target_update_interval: 200
+
+mac: "feudal_mac" # Basic controller
+
+# use the Q_Learner to train
+agent: "feudal"
+# manager
+c: 1 # 10 step
+manager_hidden_dim: 64
+state_dim: 92
+goal_dim: 16
+
+# critic
+critic_lr: 0.0005 #
+vf_coef: 0.5 #
+critic_hidden_dim: 64 #
+
+
+# worker
+worker_hidden_dim: 64
+embedding_dim: 16
+
+
+agent_output_type: "q"
+learner: "feudal_learner"
+double_q: True
+mixer: "qmix"
+mixing_embed_dim: 32
+hypernet_layers: 2
+hypernet_embed: 64
+td_lambda: 0.6
+
+intrinsic_rewards_alpha: 0.001
+
+name: "feudal"
diff --git a/src/config/algs/gnn_qmix.yaml b/src/config/algs/gnn_qmix.yaml
new file mode 100644
index 0000000..c51e919
--- /dev/null
+++ b/src/config/algs/gnn_qmix.yaml
@@ -0,0 +1,37 @@
+# --- VDN specific parameters ---
+
+# use epsilon greedy action selector
+action_selector: "epsilon_greedy"
+epsilon_start: 1.0
+epsilon_finish: 0.05
+epsilon_anneal_time: 100000 # 500000 for 6h_vs_8z
+
+runner: "parallel"
+batch_size_run: 8
+buffer_size: 5000
+batch_size: 128
+
+t_max: 10050000
+
+# update the target network every {} episodes
+target_update_interval: 200
+
+# use the Q_Learner to train
+mac: "hpn_mac"
+agent: "gnn_rnn"
+gnn_layer_num: 1
+agent_output_type: q
+
+learner: "nq_learner"
+mixer: "qmix"
+mixing_embed_dim: 32
+hypernet_embed: 64
+lr: 0.001 # Learning rate for agents
+td_lambda: 0.6
+optimizer: 'adam'
+q_lambda: False
+
+name: "gnn_qmix"
+
+obs_agent_id: True # Include the agent's one_hot id in the observation
+obs_last_action: False # Include the agent's last action (one_hot) in the observation
\ No newline at end of file
diff --git a/src/config/algs/gnn_vdn.yaml b/src/config/algs/gnn_vdn.yaml
new file mode 100644
index 0000000..dbdc316
--- /dev/null
+++ b/src/config/algs/gnn_vdn.yaml
@@ -0,0 +1,37 @@
+# --- VDN specific parameters ---
+
+# use epsilon greedy action selector
+action_selector: "epsilon_greedy"
+epsilon_start: 1.0
+epsilon_finish: 0.05
+epsilon_anneal_time: 100000 # 500000 for 6h_vs_8z
+
+runner: "parallel"
+batch_size_run: 8
+buffer_size: 5000
+batch_size: 128
+
+t_max: 10050000
+
+# update the target network every {} episodes
+target_update_interval: 200
+
+# use the Q_Learner to train
+mac: "hpn_mac"
+agent: "gnn_rnn"
+gnn_layer_num: 1
+agent_output_type: q
+
+learner: "nq_learner"
+mixer: "vdn"
+mixing_embed_dim: 32
+hypernet_embed: 64
+lr: 0.001 # Learning rate for agents
+td_lambda: 0.6
+optimizer: 'adam'
+q_lambda: False
+
+name: "gnn_vdn"
+
+obs_agent_id: True # Include the agent's one_hot id in the observation
+obs_last_action: False # Include the agent's last action (one_hot) in the observation
\ No newline at end of file
diff --git a/src/config/algs/hpn_qmix.yaml b/src/config/algs/hpn_qmix.yaml
new file mode 100644
index 0000000..b4d0e29
--- /dev/null
+++ b/src/config/algs/hpn_qmix.yaml
@@ -0,0 +1,43 @@
+# --- QMIX specific parameters ---
+
+# use epsilon greedy action selector
+action_selector: "epsilon_greedy"
+epsilon_start: 1.0
+epsilon_finish: 0.05
+epsilon_anneal_time: 100000 # 500000 for 6h_vs_8z
+
+runner: "parallel"
+batch_size_run: 8 # batch_size_run=4, buffer_size = 2500, batch_size=64 for 3s5z_vs_3s6z
+buffer_size: 5000
+batch_size: 128
+
+t_max: 10050000
+
+# update the target network every {} episodes
+target_update_interval: 200
+
+# use the Q_Learner to train
+mac: "hpn_mac"
+#agent: "hpn_rnn"
+agent: "hpns_rnn"
+
+hpn_hyper_dim: 64
+hpn_hyper_activation: 'relu'
+
+hpn_head_num: 1 # 2 for 3s_vs_5z and 6h_vs_8z
+agent_output_type: q
+
+learner: "nq_learner"
+mixer: "qmix"
+mixing_embed_dim: 32
+hypernet_embed: 64
+lr: 0.001 # Learning rate for agents
+td_lambda: 0.6 # 0.3 for 6h_vs_8z
+optimizer: 'adam'
+q_lambda: False
+
+
+name: "hpn_qmix"
+
+obs_agent_id: True # Include the agent's one_hot id in the observation
+obs_last_action: False # Include the agent's last action (one_hot) in the observation
\ No newline at end of file
diff --git a/src/config/algs/hpn_qplex.yaml b/src/config/algs/hpn_qplex.yaml
new file mode 100644
index 0000000..327386b
--- /dev/null
+++ b/src/config/algs/hpn_qplex.yaml
@@ -0,0 +1,47 @@
+# From https://github.com/wjh720/QPLEX/
+# --- Qatten specific parameters ---
+
+# use epsilon greedy action selector
+action_selector: "epsilon_greedy"
+epsilon_start: 1.0
+epsilon_finish: 0.05
+epsilon_anneal_time: 100000 # 500000 for 6h_vs_8z
+
+runner: "parallel"
+batch_size_run: 8
+buffer_size: 5000
+batch_size: 128
+
+# update the target network every {} episodes
+target_update_interval: 200
+t_max: 10050000
+
+# use the Q_Learner to train
+mac: "hpn_mac"
+#agent: "hpn_rnn"
+agent: "hpns_rnn"
+hpn_head_num: 1
+
+hpn_hyper_dim: 64
+hpn_hyper_activation: 'relu'
+
+rnn_hidden_dim: 64 # Size of hidden state for default rnn agent
+agent_output_type: "q"
+
+learner: "dmaq_qatten_learner"
+double_q: True
+mixer: "dmaq"
+mixing_embed_dim: 32
+hypernet_embed: 64
+adv_hypernet_layers: 2
+adv_hypernet_embed: 64
+td_lambda: 0.6
+lr: 0.001
+
+num_kernel: 4
+is_minus_one: True
+weighted_head: True
+is_adv_attention: True
+is_stop_gradient: True
+
+name: "hpn_qplex"
diff --git a/src/config/algs/hpn_vdn.yaml b/src/config/algs/hpn_vdn.yaml
new file mode 100644
index 0000000..457fa1a
--- /dev/null
+++ b/src/config/algs/hpn_vdn.yaml
@@ -0,0 +1,43 @@
+# --- VDN specific parameters ---
+
+# use epsilon greedy action selector
+action_selector: "epsilon_greedy"
+epsilon_start: 1.0
+epsilon_finish: 0.05
+epsilon_anneal_time: 100000 # 500000 for 6h_vs_8z
+
+runner: "parallel"
+batch_size_run: 8
+buffer_size: 5000
+batch_size: 128
+
+t_max: 10050000
+
+# update the target network every {} episodes
+target_update_interval: 200
+
+# use the Q_Learner to train
+mac: "hpn_mac"
+#agent: "hpn_rnn"
+agent: "hpns_rnn"
+
+hpn_hyper_dim: 64
+hpn_hyper_activation: 'relu'
+hpn_head_num: 1 # 2 for 3s_vs_5z and 6h_vs_8z
+agent_output_type: q
+
+learner: "nq_learner"
+mixer: "vdn"
+mixing_embed_dim: 32
+hypernet_embed: 64
+
+
+lr: 0.001 # Learning rate for agents
+td_lambda: 0.6
+optimizer: 'adam'
+q_lambda: False
+
+name: "hpn_vdn"
+
+obs_agent_id: True # Include the agent's one_hot id in the observation
+obs_last_action: False # Include the agent's last action (one_hot) in the observation
\ No newline at end of file
diff --git a/src/config/algs/qmix.yaml b/src/config/algs/qmix.yaml
new file mode 100644
index 0000000..8bf5742
--- /dev/null
+++ b/src/config/algs/qmix.yaml
@@ -0,0 +1,47 @@
+# --- QMIX specific parameters ---
+
+# use epsilon greedy action selector
+action_selector: "epsilon_greedy"
+epsilon_start: 1.0
+epsilon_finish: 0.05
+epsilon_anneal_time: 100000 # 500000 for 6h_vs_8z
+
+runner: "parallel"
+batch_size_run: 8 # batch_size_run=4, buffer_size = 2500, batch_size=64 for 3s5z_vs_3s6z
+buffer_size: 5000
+batch_size: 128
+
+t_max: 10050000
+
+# update the target network every {} episodes
+target_update_interval: 200
+
+# use the Q_Learner to train
+mac: "n_mac"
+agent: "n_rnn"
+agent_output_type: q
+
+learner: "nq_learner"
+mixer: "qmix"
+mixing_embed_dim: 32
+hypernet_embed: 64
+lr: 0.001 # Learning rate for agents
+td_lambda: 0.6 # 0.3 for 6h_vs_8z
+optimizer: 'adam'
+q_lambda: False
+
+# rnn layer normalization
+use_layer_norm: False
+
+# orthogonal init for DNN
+use_orthogonal: False
+gain: 0.01
+
+# Priority experience replay
+use_per: False
+per_alpha: 0.6
+per_beta: 0.4
+return_priority: False
+
+#name: "qmix_env=8_adam_td_lambda"
+name: "qmix"
\ No newline at end of file
diff --git a/src/config/algs/qmix_DA.yaml b/src/config/algs/qmix_DA.yaml
new file mode 100644
index 0000000..a065f6d
--- /dev/null
+++ b/src/config/algs/qmix_DA.yaml
@@ -0,0 +1,51 @@
+# --- QMIX specific parameters ---
+
+# use epsilon greedy action selector
+action_selector: "epsilon_greedy"
+epsilon_start: 1.0
+epsilon_finish: 0.05
+epsilon_anneal_time: 100000 # 500000 for 6h_vs_8z
+
+runner: "parallel"
+batch_size_run: 8 # batch_size_run=4, buffer_size = 2500, batch_size=64 for 3s5z_vs_3s6z
+buffer_size: 5000
+batch_size: 128
+
+t_max: 10050000
+
+# update the target network every {} episodes
+target_update_interval: 200
+
+# use the Q_Learner to train
+mac: "n_mac"
+agent: "n_rnn"
+rnn_hidden_dim: 64 # Size of hidden state for default rnn agent
+agent_output_type: q
+
+learner: "q_learner_data_augmentation"
+mixer: "qmix"
+mixing_embed_dim: 32
+hypernet_embed: 64
+lr: 0.001 # Learning rate for agents
+td_lambda: 0.6 # 0.3 for 6h_vs_8z
+optimizer: 'adam'
+q_lambda: False
+
+# rnn layer normalization
+use_layer_norm: False
+
+# orthogonal init for DNN
+use_orthogonal: False
+gain: 0.01
+
+# Priority experience replay
+use_per: False
+per_alpha: 0.6
+per_beta: 0.4
+return_priority: False
+
+#name: "qmix_env=8_adam_td_lambda"
+name: "qmix_DA"
+
+enable_data_augmentation: True
+augment_times: 3
\ No newline at end of file
diff --git a/src/config/algs/qplex.yaml b/src/config/algs/qplex.yaml
new file mode 100644
index 0000000..6054291
--- /dev/null
+++ b/src/config/algs/qplex.yaml
@@ -0,0 +1,41 @@
+# From https://github.com/wjh720/QPLEX/
+# --- Qatten specific parameters ---
+
+# use epsilon greedy action selector
+action_selector: "epsilon_greedy"
+epsilon_start: 1.0
+epsilon_finish: 0.05
+epsilon_anneal_time: 100000 # 500000 for 6h_vs_8z
+
+runner: "parallel"
+batch_size_run: 8
+buffer_size: 5000
+batch_size: 128
+
+# update the target network every {} episodes
+target_update_interval: 200
+t_max: 10050000
+
+# use the Q_Learner to train
+mac: "n_mac"
+agent: "n_rnn"
+rnn_hidden_dim: 64 # Size of hidden state for default rnn agent
+agent_output_type: "q"
+
+learner: "dmaq_qatten_learner"
+double_q: True
+mixer: "dmaq"
+mixing_embed_dim: 32
+hypernet_embed: 64
+adv_hypernet_layers: 2
+adv_hypernet_embed: 64
+td_lambda: 0.6
+lr: 0.001
+
+num_kernel: 4
+is_minus_one: True
+weighted_head: True
+is_adv_attention: True
+is_stop_gradient: True
+
+name: "qplex"
diff --git a/src/config/algs/updet_qmix.yaml b/src/config/algs/updet_qmix.yaml
new file mode 100644
index 0000000..7b42799
--- /dev/null
+++ b/src/config/algs/updet_qmix.yaml
@@ -0,0 +1,38 @@
+# --- QMIX specific parameters ---
+
+# use epsilon greedy action selector
+action_selector: "epsilon_greedy"
+epsilon_start: 1.0
+epsilon_finish: 0.05
+epsilon_anneal_time: 100000 # 500000 for 6h_vs_8z
+
+runner: "parallel"
+batch_size_run: 8
+buffer_size: 5000
+batch_size: 128
+
+t_max: 10050000
+
+# update the target network every {} episodes
+target_update_interval: 200
+
+# use the Q_Learner to train
+mac: "updet_mac"
+agent: "updet_agent"
+
+agent_output_type: q
+# %%%%%%%%%%%%% Transformer Settings %%%%%%%%%%%
+transformer_embed_dim: 32
+transformer_heads: 3 # head number of transformer
+transformer_depth: 2 # block number of transformer
+
+learner: "nq_learner"
+mixer: "qmix"
+mixing_embed_dim: 32
+hypernet_embed: 64
+lr: 0.001 # Learning rate for agents
+td_lambda: 0.6
+optimizer: 'adam'
+q_lambda: False
+
+name: "updet_qmix"
\ No newline at end of file
diff --git a/src/config/algs/updet_vdn.yaml b/src/config/algs/updet_vdn.yaml
new file mode 100644
index 0000000..1e8e14b
--- /dev/null
+++ b/src/config/algs/updet_vdn.yaml
@@ -0,0 +1,38 @@
+# --- QMIX specific parameters ---
+
+# use epsilon greedy action selector
+action_selector: "epsilon_greedy"
+epsilon_start: 1.0
+epsilon_finish: 0.05
+epsilon_anneal_time: 100000 # 500000 for 6h_vs_8z
+
+runner: "parallel"
+batch_size_run: 8
+buffer_size: 5000
+batch_size: 128
+
+t_max: 10050000
+
+# update the target network every {} episodes
+target_update_interval: 200
+
+# use the Q_Learner to train
+mac: "updet_mac"
+agent: "updet_agent"
+
+agent_output_type: q
+# %%%%%%%%%%%%% Transformer Settings %%%%%%%%%%%
+transformer_embed_dim: 32
+transformer_heads: 3 # head number of transformer
+transformer_depth: 2 # block number of transformer
+
+learner: "nq_learner"
+mixer: "vdn"
+mixing_embed_dim: 32
+hypernet_embed: 64
+lr: 0.001 # Learning rate for agents
+td_lambda: 0.6
+optimizer: 'adam'
+q_lambda: False
+
+name: "updet_vdn"
\ No newline at end of file
diff --git a/src/config/algs/vdn.yaml b/src/config/algs/vdn.yaml
new file mode 100644
index 0000000..e2fed29
--- /dev/null
+++ b/src/config/algs/vdn.yaml
@@ -0,0 +1,36 @@
+# --- VDN specific parameters ---
+
+# use epsilon greedy action selector
+action_selector: "epsilon_greedy"
+epsilon_start: 1.0
+epsilon_finish: 0.05
+epsilon_anneal_time: 100000 # 500000 for 6h_vs_8z
+
+runner: "parallel"
+batch_size_run: 8
+buffer_size: 5000
+batch_size: 128
+
+t_max: 10050000
+
+# update the target network every {} episodes
+target_update_interval: 200
+
+# use the Q_Learner to train
+mac: "n_mac"
+agent: "n_rnn"
+rnn_hidden_dim: 64 # Size of hidden state for default rnn agent
+#rnn_hidden_dim: 128 # Size of hidden state for default rnn agent
+agent_output_type: q
+
+learner: "nq_learner"
+mixer: "vdn"
+mixing_embed_dim: 32
+hypernet_embed: 64
+lr: 0.001 # Learning rate for agents
+td_lambda: 0.6
+optimizer: 'adam'
+q_lambda: False
+
+#name: "vdn_env=8_adam_td_lambda"
+name: "vdn"
diff --git a/src/config/algs/vdn_DA.yaml b/src/config/algs/vdn_DA.yaml
new file mode 100644
index 0000000..ad02ddc
--- /dev/null
+++ b/src/config/algs/vdn_DA.yaml
@@ -0,0 +1,38 @@
+# --- VDN specific parameters ---
+
+# use epsilon greedy action selector
+action_selector: "epsilon_greedy"
+epsilon_start: 1.0
+epsilon_finish: 0.05
+epsilon_anneal_time: 100000 # 500000 for 6h_vs_8z
+
+runner: "parallel"
+batch_size_run: 8
+buffer_size: 5000
+batch_size: 128
+
+t_max: 10050000
+
+# update the target network every {} episodes
+target_update_interval: 200
+
+# use the Q_Learner to train
+mac: "n_mac"
+agent: "n_rnn"
+rnn_hidden_dim: 64 # Size of hidden state for default rnn agent
+agent_output_type: q
+
+learner: "q_learner_data_augmentation"
+mixer: "vdn"
+mixing_embed_dim: 32
+hypernet_embed: 64
+lr: 0.001 # Learning rate for agents
+td_lambda: 0.6
+optimizer: 'adam'
+q_lambda: False
+
+#name: "vdn_env=8_adam_td_lambda"
+name: "vdn_DA"
+
+enable_data_augmentation: True
+augment_times: 3
diff --git a/src/config/default.yaml b/src/config/default.yaml
new file mode 100644
index 0000000..8f22912
--- /dev/null
+++ b/src/config/default.yaml
@@ -0,0 +1,57 @@
+# --- Defaults ---
+
+# --- pymarl options ---
+runner: "episode" # Runs 1 env for an episode
+mac: "basic_mac" # Basic controller
+env: "sc2" # Environment name
+env_args: {} # Arguments for the environment
+batch_size_run: 1 # Number of environments to run in parallel
+test_nepisode: 20 # Number of episodes to test for
+test_interval: 2000 # Test after {} timesteps have passed
+test_greedy: True # Use greedy evaluation (if False, will set epsilon floor to 0
+log_interval: 2000 # Log summary of stats after every {} timesteps
+runner_log_interval: 2000 # Log runner stats (not test stats) every {} timesteps
+learner_log_interval: 2000 # Log training stats every {} timesteps
+t_max: 10000 # Stop running after this many timesteps
+use_cuda: True # Use gpu by default unless it isn't available
+buffer_cpu_only: True # If true we won't keep all of the replay buffer in vram
+
+# --- Logging options ---
+use_tensorboard: True # Log results to tensorboard
+save_model: True # Save the models to disk
+save_model_interval: 2000000 # Save models after this many timesteps
+checkpoint_path: "" # Load a checkpoint from this path
+evaluate: False # Evaluate model for test_nepisode episodes and quit (no training)
+load_step: 0 # Load model trained on this many timesteps (0 if choose max possible)
+save_replay: False # Saving the replay of the model loaded from checkpoint_path
+local_results_path: "results" # Path for local results
+
+# --- RL hyperparameters ---
+gamma: 0.99
+batch_size: 32 # Number of episodes to train on
+buffer_size: 32 # Size of the replay buffer
+lr: 0.0005 # Learning rate for agents
+critic_lr: 0.0005 # Learning rate for critics
+optim_alpha: 0.99 # RMSProp alpha
+optim_eps: 0.00001 # RMSProp epsilon
+grad_norm_clip: 10 # Reduce magnitude of gradients above this L2 norm
+double_q: True
+
+# --- Agent parameters ---
+agent: "rnn" # Default rnn agent
+rnn_hidden_dim: 64 # Size of hidden state for default rnn agent
+obs_agent_id: True # Include the agent's one_hot id in the observation
+obs_last_action: True # Include the agent's last action (one_hot) in the observation
+
+# --- Experiment running params ---
+repeat_id: 1
+label: "default_label"
+
+run: "default"
+
+thread_num: 4 # 12
+enable_parallel_computing: False
+
+cpu_inference: True
+#cpu_inference: False
+asn_hidden_size: 32 # 64
diff --git a/src/config/envs/sc2.yaml b/src/config/envs/sc2.yaml
new file mode 100644
index 0000000..47c754a
--- /dev/null
+++ b/src/config/envs/sc2.yaml
@@ -0,0 +1,42 @@
+env: sc2
+
+env_args:
+ continuing_episode: False
+ difficulty: "7"
+ game_version: null
+ map_name: "3m"
+ move_amount: 2
+ obs_all_health: True
+ obs_instead_of_state: False
+ obs_last_action: False
+ obs_own_health: True
+ obs_pathing_grid: False
+ obs_terrain_height: False
+ obs_timestep_number: False
+ reward_death_value: 10
+ reward_defeat: 0
+ reward_negative_scale: 0.5
+ reward_only_positive: True
+ reward_scale: True
+ reward_scale_rate: 20
+ reward_sparse: False
+ reward_win: 200
+ replay_dir: ""
+ replay_prefix: ""
+ state_last_action: True
+ state_timestep_number: False
+ step_mul: 8
+ seed: null
+ heuristic_ai: False
+ heuristic_rest: False
+ debug: False
+
+test_greedy: True
+test_nepisode: 32
+test_interval: 10000
+log_interval: 10000
+runner_log_interval: 10000
+learner_log_interval: 10000
+t_max: 2050000
+obs_agent_id: True # Include the agent's one_hot id in the observation
+obs_last_action: True # Include the agent's last action (one_hot) in the observation
diff --git a/src/config/envs/sc2_v2_protoss.yaml b/src/config/envs/sc2_v2_protoss.yaml
new file mode 100644
index 0000000..0480bf5
--- /dev/null
+++ b/src/config/envs/sc2_v2_protoss.yaml
@@ -0,0 +1,76 @@
+env: sc2_v2
+
+env_args:
+ change_fov_with_move: False # if True, we split the full field-of-view into 4 90-degree sectors (instead of 12 30-degree sectors), each of which corresponds to `move north, south, east, west'.
+ # %%%%%%%%%%%%%%%%%%%%%% new config compared to v1 %%%%%%%%%%%%%%%%%%%%%%
+ capability_config:
+ n_units: 5
+ team_gen:
+ dist_type: "weighted_teams"
+ unit_types:
+ - "stalker"
+ - "zealot"
+ - "colossus"
+ weights:
+ - 0.45
+ - 0.45
+ - 0.1
+ exception_unit_types:
+ - "colossus"
+ observe: True
+
+ start_positions:
+ dist_type: "surrounded_and_reflect"
+ p: 0.5
+ n_enemies: 5
+ map_x: 32
+ map_y: 32
+
+ map_name: "10gen_protoss"
+ obs_own_pos: True
+ obs_starcraft: True
+ # conic_fov: True
+ # Since our target is not to design more efficient exploration algorithms, we keep the field-of-view and attack of the agents a full circle as in SMAC-V1.
+ conic_fov: False
+ num_fov_actions: 12
+ kill_unit_step_mul: 2
+ fully_observable: False
+ # %%%%%%%%%%%%%%%%%%%%%% new config compared to v1 %%%%%%%%%%%%%%%%%%%%%%
+
+
+ continuing_episode: False
+ difficulty: "7"
+ game_version: null
+ move_amount: 2
+ obs_all_health: True
+ obs_instead_of_state: False
+ obs_last_action: False
+ obs_own_health: True
+ obs_pathing_grid: False
+ obs_terrain_height: False
+ obs_timestep_number: False
+ reward_death_value: 10
+ reward_defeat: 0
+ reward_negative_scale: 0.5
+ reward_only_positive: True
+ reward_scale: True
+ reward_scale_rate: 20
+ reward_sparse: False
+ reward_win: 200
+ replay_dir: ""
+ replay_prefix: ""
+ state_last_action: True
+ state_timestep_number: False
+ step_mul: 8
+ seed: null
+ heuristic_ai: False
+ heuristic_rest: False
+ debug: False
+
+test_greedy: True
+test_nepisode: 32
+test_interval: 10000
+log_interval: 10000
+runner_log_interval: 10000
+learner_log_interval: 10000
+t_max: 2050000
diff --git a/src/config/envs/sc2_v2_terran.yaml b/src/config/envs/sc2_v2_terran.yaml
new file mode 100644
index 0000000..4374289
--- /dev/null
+++ b/src/config/envs/sc2_v2_terran.yaml
@@ -0,0 +1,76 @@
+env: sc2_v2
+
+env_args:
+ change_fov_with_move: False # if True, we split the full field-of-view into 4 90-degree sectors (instead of 12 30-degree sectors), each of which corresponds to `move north, south, east, west'.
+ # %%%%%%%%%%%%%%%%%%%%%% new config compared to v1 %%%%%%%%%%%%%%%%%%%%%%
+ capability_config:
+ n_units: 5
+ team_gen:
+ dist_type: "weighted_teams"
+ unit_types:
+ - "marine"
+ - "marauder"
+ - "medivac"
+ weights:
+ - 0.45
+ - 0.45
+ - 0.1
+ exception_unit_types:
+ - "medivac"
+ observe: True
+
+ start_positions:
+ dist_type: "surrounded_and_reflect"
+ p: 0.5
+ n_enemies: 5
+ map_x: 32
+ map_y: 32
+
+ map_name: "10gen_terran"
+ obs_own_pos: True
+ obs_starcraft: True
+ # conic_fov: True
+ # Since our target is not to design more efficient exploration algorithms, we keep the field-of-view and attack of the agents a full circle as in SMAC-V1.
+ conic_fov: False
+ num_fov_actions: 12
+ kill_unit_step_mul: 2
+ fully_observable: False
+ # %%%%%%%%%%%%%%%%%%%%%% new config compared to v1 %%%%%%%%%%%%%%%%%%%%%%
+
+
+ continuing_episode: False
+ difficulty: "7"
+ game_version: null
+ move_amount: 2
+ obs_all_health: True
+ obs_instead_of_state: False
+ obs_last_action: False
+ obs_own_health: True
+ obs_pathing_grid: False
+ obs_terrain_height: False
+ obs_timestep_number: False
+ reward_death_value: 10
+ reward_defeat: 0
+ reward_negative_scale: 0.5
+ reward_only_positive: True
+ reward_scale: True
+ reward_scale_rate: 20
+ reward_sparse: False
+ reward_win: 200
+ replay_dir: ""
+ replay_prefix: ""
+ state_last_action: True
+ state_timestep_number: False
+ step_mul: 8
+ seed: null
+ heuristic_ai: False
+ heuristic_rest: False
+ debug: False
+
+test_greedy: True
+test_nepisode: 32
+test_interval: 10000
+log_interval: 10000
+runner_log_interval: 10000
+learner_log_interval: 10000
+t_max: 2050000
diff --git a/src/config/envs/sc2_v2_zerg.yaml b/src/config/envs/sc2_v2_zerg.yaml
new file mode 100644
index 0000000..1760958
--- /dev/null
+++ b/src/config/envs/sc2_v2_zerg.yaml
@@ -0,0 +1,76 @@
+env: sc2_v2
+
+env_args:
+ change_fov_with_move: False # if True, we split the full field-of-view into 4 90-degree sectors (instead of 12 30-degree sectors), each of which corresponds to `move north, south, east, west'.
+ # %%%%%%%%%%%%%%%%%%%%%% new config compared to v1 %%%%%%%%%%%%%%%%%%%%%%
+ capability_config:
+ n_units: 5
+ team_gen:
+ dist_type: "weighted_teams"
+ unit_types:
+ - "zergling"
+ - "hydralisk"
+ - "baneling"
+ weights:
+ - 0.45
+ - 0.45
+ - 0.1
+ exception_unit_types:
+ - "baneling"
+ observe: True
+
+ start_positions:
+ dist_type: "surrounded_and_reflect"
+ p: 0.5
+ n_enemies: 5
+ map_x: 32
+ map_y: 32
+
+ map_name: "10gen_zerg"
+ obs_own_pos: True
+ obs_starcraft: True
+ # conic_fov: True
+ # Since our target is not to design more efficient exploration algorithms, we keep the field-of-view and attack of the agents a full circle as in SMAC-V1.
+ conic_fov: False
+ num_fov_actions: 12
+ kill_unit_step_mul: 2
+ fully_observable: False
+ # %%%%%%%%%%%%%%%%%%%%%% new config compared to v1 %%%%%%%%%%%%%%%%%%%%%%
+
+
+ continuing_episode: False
+ difficulty: "7"
+ game_version: null
+ move_amount: 2
+ obs_all_health: True
+ obs_instead_of_state: False
+ obs_last_action: False
+ obs_own_health: True
+ obs_pathing_grid: False
+ obs_terrain_height: False
+ obs_timestep_number: False
+ reward_death_value: 10
+ reward_defeat: 0
+ reward_negative_scale: 0.5
+ reward_only_positive: True
+ reward_scale: True
+ reward_scale_rate: 20
+ reward_sparse: False
+ reward_win: 200
+ replay_dir: ""
+ replay_prefix: ""
+ state_last_action: True
+ state_timestep_number: False
+ step_mul: 8
+ seed: null
+ heuristic_ai: False
+ heuristic_rest: False
+ debug: False
+
+test_greedy: True
+test_nepisode: 32
+test_interval: 10000
+log_interval: 10000
+runner_log_interval: 10000
+learner_log_interval: 10000
+t_max: 2050000
diff --git a/src/controllers/FeUdal_controller.py b/src/controllers/FeUdal_controller.py
new file mode 100644
index 0000000..0b2f4e7
--- /dev/null
+++ b/src/controllers/FeUdal_controller.py
@@ -0,0 +1,164 @@
+#from modules.agents import REGISTRY as agent_REGISTRY
+from modules.agents.FeUdal_agent import Feudal_ManagerAgent, Feudal_WorkerAgent
+from components.action_selectors import REGISTRY as action_REGISTRY
+import torch
+
+
+# This multi-agent controller shares parameters between agents
+class FeUdalMAC:
+ def __init__(self, scheme, groups, args):
+ self.n_agents = args.n_agents
+ self.args = args
+ self.input_shape = self._get_input_shape(scheme)
+ self.manager_input_shape = self._get_manager_input_shape(scheme)
+ self._build_agents(self.manager_input_shape, self.input_shape)
+ self.agent_output_type = args.agent_output_type
+
+ self.action_selector = action_REGISTRY[args.action_selector](args)
+
+ self.manager_hidden_states = None
+ self.manager_cell_states = None
+ self.worker_hidden_states = None
+ self.worker_cell_states = None
+
+ self.single_past_goals = []
+ self.batch_past_goals = []
+
+ def select_actions(self, ep_batch, t_ep, t_env, bs=slice(None), test_mode=False):
+ # Only select actions for the selected batch elements in bs
+ avail_actions = ep_batch["avail_actions"][:, t_ep]
+ agent_outputs, goal_outs, value_outs = self.forward(ep_batch, t_ep, test_mode=test_mode)
+ chosen_actions = self.action_selector.select_action(agent_outputs[bs], avail_actions[bs], t_env, test_mode=test_mode)
+ return chosen_actions
+
+ def forward(self, ep_batch, t, test_mode=False):
+ manager_inputs = self._build_manager_inputs(ep_batch, t)
+ worker_inputs = self._build_inputs(ep_batch, t)
+
+ # 更新為處理 LSTM 的 hidden states 和 cell states
+ manager_goal, manager_value, (new_manager_hidden, new_manager_cell) = self.manager_agent(
+ manager_inputs,
+ (self.manager_hidden_states, self.manager_cell_states)
+ )
+ self.manager_hidden_states = new_manager_hidden
+ self.manager_cell_states = new_manager_cell
+
+ worker_agent_outs, (new_worker_hidden, new_worker_cell), self.single_past_goals, self.batch_past_goals = self.worker_agent(
+ worker_inputs,
+ (self.worker_hidden_states, self.worker_cell_states),
+ self.single_past_goals,
+ self.batch_past_goals,
+ manager_goal
+ )
+ self.worker_hidden_states = new_worker_hidden
+ self.worker_cell_states = new_worker_cell
+
+ return worker_agent_outs.view(ep_batch.batch_size, self.n_agents, -1), manager_goal.view(ep_batch.batch_size, self.n_agents, -1), manager_value.view(ep_batch.batch_size, self.n_agents, -1)
+
+ def init_hidden(self, batch_size):
+ # 初始化 LSTM 的 hidden states 和 cell states
+ manager_hidden, manager_cell = self.manager_agent.init_hidden()
+ worker_hidden, worker_cell = self.worker_agent.init_hidden()
+
+ self.manager_hidden_states = manager_hidden.unsqueeze(0).expand(batch_size, self.n_agents, -1)
+ self.manager_cell_states = manager_cell.unsqueeze(0).expand(batch_size, self.n_agents, -1)
+ self.worker_hidden_states = worker_hidden.unsqueeze(0).expand(batch_size, self.n_agents, -1)
+ self.worker_cell_states = worker_cell.unsqueeze(0).expand(batch_size, self.n_agents, -1)
+
+ def parameters(self):
+ """返回所有可訓練參數"""
+ # 將生成器轉換為列表後再合併
+ manager_params = list(self.manager_agent.parameters())
+ worker_params = list(self.worker_agent.parameters())
+ return manager_params + worker_params
+
+ def manager_parameters(self):
+ return self.manager_agent.parameters()
+
+ def worker_parameters(self):
+ return self.worker_agent.parameters()
+
+ def load_state(self, other_mac):
+ self.manager_agent.load_state_dict(other_mac.manager_agent.state_dict())
+ self.worker_agent.load_state_dict(other_mac.worker_agent.state_dict())
+
+ def cuda(self):
+ self.manager_agent.cuda()
+ self.worker_agent.cuda()
+
+ def cpu(self):
+ self.manager_agent.cpu()
+ self.worker_agent.cpu()
+
+ def get_device(self):
+ """返回模型所在的設備(CPU或GPU)"""
+ # 直接檢查第一個參數的設備
+ if hasattr(self, 'manager_agent'):
+ return next(self.manager_agent.parameters()).device
+ elif hasattr(self, 'worker_agent'):
+ return next(self.worker_agent.parameters()).device
+ return torch.device("cpu") # 默認返回 CPU
+
+ def save_models(self, path):
+ torch.save(self.manager_agent.state_dict(), "{}/manager_agent.torch".format(path))
+ torch.save(self.worker_agent.state_dict(), "{}/worker_agent.torch".format(path))
+
+
+ def load_models(self, path):
+ self.manager_agent.load_state_dict(torch.load("{}/manager_agent.torch".format(path), map_location=lambda storage, loc: storage))
+ self.worker_agent.load_state_dict(torch.load("{}/worker_agent.torch".format(path), map_location=lambda storage, loc: storage))
+
+ def _build_agents(self, manager_input_shape, worker_input_shape):
+ self.manager_agent = Feudal_ManagerAgent(manager_input_shape, self.args)
+ self.worker_agent = Feudal_WorkerAgent(worker_input_shape, self.args)
+
+ def _build_inputs(self, batch, t):
+ # Assumes homogenous agents with flat observations.
+ # Other MACs might want to e.g. delegate building inputs to each agent
+ bs = batch.batch_size
+ inputs = []
+ inputs.append(batch["obs"][:, t]) # b1av
+ if self.args.obs_last_action:
+ if t == 0:
+ inputs.append(torch.zeros_like(batch["actions_onehot"][:, t]))
+ else:
+ inputs.append(batch["actions_onehot"][:, t-1])
+ if self.args.obs_agent_id:
+ inputs.append(torch.eye(self.n_agents, device=batch.device).unsqueeze(0).expand(bs, -1, -1))
+
+ inputs = torch.cat([x.reshape(bs*self.n_agents, -1) for x in inputs], dim=1)
+ return inputs
+
+ def _get_input_shape(self, scheme):
+ input_shape = scheme["obs"]["vshape"]
+ if self.args.obs_last_action:
+ input_shape += scheme["actions_onehot"]["vshape"][0]
+ if self.args.obs_agent_id:
+ input_shape += self.n_agents
+
+ return input_shape
+
+ def _build_manager_inputs(self, batch, t):
+ # 假設 manager 需要不同的輸入結構
+ bs = batch.batch_size
+ manager_inputs = []
+ manager_inputs.append(batch["obs"][:, t]) # 假設有一個 manager 特定的觀察
+ # if self.args.obs_last_action:
+ # if t == 0:
+ # manager_inputs.append(torch.zeros_like(batch["actions_onehot"][:, t]))
+ # else:
+ # manager_inputs.append(batch["actions_onehot"][:, t-1])
+ # # 不需要代理ID,移除以下程式碼
+ # if self.args.obs_agent_id:
+ # manager_inputs.append(torch.eye(self.n_agents, device=batch.device).unsqueeze(0).expand(bs, -1, -1))
+
+ manager_inputs = torch.cat([x.reshape(bs*self.n_agents, -1) for x in manager_inputs], dim=1)
+ return manager_inputs
+
+ def _get_manager_input_shape(self, scheme):
+ input_shape = scheme["obs"]["vshape"]
+ # if self.args.manager_obs_last_action:
+ # input_shape += scheme["manager_actions_onehot"]["vshape"][0]
+ # if self.args.manager_obs_agent_id:
+ # input_shape += self.n_agents
+ return input_shape
diff --git a/src/controllers/__init__.py b/src/controllers/__init__.py
new file mode 100644
index 0000000..d4a323a
--- /dev/null
+++ b/src/controllers/__init__.py
@@ -0,0 +1,12 @@
+REGISTRY = {}
+
+from .hpn_controller import HPNMAC
+from .basic_controller import BasicMAC
+from .n_controller import NMAC
+from .updet_controller import UPDETController
+from .FeUdal_controller import FeUdalMAC
+REGISTRY["basic_mac"] = BasicMAC
+REGISTRY["n_mac"] = NMAC
+REGISTRY["hpn_mac"] = HPNMAC
+REGISTRY["updet_mac"] = UPDETController
+REGISTRY["feudal_mac"] = FeUdalMAC
diff --git a/src/controllers/basic_controller.py b/src/controllers/basic_controller.py
new file mode 100644
index 0000000..f042491
--- /dev/null
+++ b/src/controllers/basic_controller.py
@@ -0,0 +1,111 @@
+import os
+
+from modules.agents import REGISTRY as agent_REGISTRY
+from components.action_selectors import REGISTRY as action_REGISTRY
+import torch as th
+from utils.th_utils import get_parameters_num
+
+
+# This multi-agent controller shares parameters between agents
+class BasicMAC:
+ def __init__(self, scheme, groups, args):
+ self.n_agents = args.n_agents
+ self.args = args
+ self.input_shape = self._get_input_shape(scheme)
+ self._build_agents(self.input_shape)
+ self.agent_output_type = args.agent_output_type
+
+ self.action_selector = action_REGISTRY[args.action_selector](args)
+ self.save_probs = getattr(self.args, 'save_probs', False)
+
+ self.hidden_states = None
+
+ def select_actions(self, ep_batch, t_ep, t_env, bs=slice(None), test_mode=False):
+ if t_ep == 0:
+ self.set_evaluation_mode()
+ # Only select actions for the selected batch elements in bs
+ avail_actions = ep_batch["avail_actions"][:, t_ep]
+ agent_outputs = self.forward(ep_batch, t_ep, test_mode=test_mode)
+ chosen_actions = self.action_selector.select_action(agent_outputs[bs], avail_actions[bs], t_env, test_mode=test_mode)
+ return chosen_actions
+
+ def forward(self, ep_batch, t, test_mode=False):
+ agent_inputs = self._build_inputs(ep_batch, t)
+ avail_actions = ep_batch["avail_actions"][:, t]
+ agent_outs, self.hidden_states = self.agent(agent_inputs, self.hidden_states)
+
+ # Softmax the agent outputs if they're policy logits
+ if self.agent_output_type == "pi_logits":
+ if getattr(self.args, "mask_before_softmax", True):
+ # Make the logits for unavailable actions very negative to minimise their affect on the softmax
+ agent_outs = agent_outs.reshape(ep_batch.batch_size * self.n_agents, -1)
+ reshaped_avail_actions = avail_actions.reshape(ep_batch.batch_size * self.n_agents, -1)
+ agent_outs[reshaped_avail_actions == 0] = -1e10
+
+ agent_outs = th.nn.functional.softmax(agent_outs, dim=-1)
+
+ return agent_outs.view(ep_batch.batch_size, self.n_agents, -1)
+
+ def init_hidden(self, batch_size):
+ self.hidden_states = self.agent.init_hidden()
+ if self.hidden_states is not None:
+ self.hidden_states = self.hidden_states.unsqueeze(0).expand(batch_size, self.n_agents, -1) # bav
+
+ def set_train_mode(self):
+ self.agent.train()
+
+ def set_evaluation_mode(self):
+ self.agent.eval()
+
+ def parameters(self):
+ return self.agent.parameters()
+
+ def load_state(self, other_mac):
+ self.agent.load_state_dict(other_mac.agent.state_dict())
+
+ def cuda(self):
+ self.agent.cuda()
+
+ def cpu(self):
+ self.agent.cpu()
+
+ def get_device(self):
+ return next(self.parameters()).device
+
+ def save_models(self, path):
+ th.save(self.agent.state_dict(), "{}/agent.th".format(path))
+
+ def load_models(self, path):
+ self.agent.load_state_dict(th.load("{}/agent.th".format(path), map_location=lambda storage, loc: storage))
+
+ def _build_agents(self, input_shape):
+ self.agent = agent_REGISTRY[self.args.agent](input_shape, self.args)
+ #print("&&&&&&&&&&&&&&&&&&&&&&", self.args.agent, get_parameters_num(self.parameters()))
+ # for p in list(self.parameters()):
+ # print(p.shape)
+
+ def _build_inputs(self, batch, t):
+ # Assumes homogenous agents with flat observations.
+ # Other MACs might want to e.g. delegate building inputs to each agent
+ bs = batch.batch_size
+ inputs = []
+ inputs.append(batch["obs"][:, t]) # b1av
+ if self.args.obs_last_action:
+ if t == 0:
+ inputs.append(th.zeros_like(batch["actions_onehot"][:, t]))
+ else:
+ inputs.append(batch["actions_onehot"][:, t-1])
+ if self.args.obs_agent_id:
+ inputs.append(th.eye(self.n_agents, device=batch.device).unsqueeze(0).expand(bs, -1, -1))
+
+ inputs = th.cat([x.reshape(bs, self.n_agents, -1) for x in inputs], dim=-1)
+ return inputs
+
+ def _get_input_shape(self, scheme):
+ input_shape = scheme["obs"]["vshape"]
+ if self.args.obs_last_action:
+ input_shape += scheme["actions_onehot"]["vshape"][0]
+ if self.args.obs_agent_id:
+ input_shape += self.n_agents
+
+ return input_shape
diff --git a/src/controllers/hpn_controller.py b/src/controllers/hpn_controller.py
new file mode 100644
index 0000000..8824f6b
--- /dev/null
+++ b/src/controllers/hpn_controller.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+# -*- coding: UTF-8 -*-
+import numpy as np
+import torch as th
+
+from .basic_controller import BasicMAC
+
+
+class DataParallelAgent(th.nn.DataParallel):
+ def init_hidden(self):
+ # make hidden states on same device as model
+ return self.module.init_hidden()
+
+
+# This multi-agent controller shares parameters between agents
+class HPNMAC(BasicMAC):
+ def __init__(self, scheme, groups, args):
+ super(HPNMAC, self).__init__(scheme, groups, args)
+ self.n_enemies = args.n_enemies
+ self.n_allies = self.n_agents - 1
+
+ # Add new func
+ def _get_obs_component_dim(self):
+ move_feats_dim, enemy_feats_dim, ally_feats_dim, own_feats_dim = self.args.obs_component # [4, (6, 5), (4, 5), 1]
+ enemy_feats_dim_flatten = np.prod(enemy_feats_dim)
+ ally_feats_dim_flatten = np.prod(ally_feats_dim)
+ return (move_feats_dim, enemy_feats_dim_flatten, ally_feats_dim_flatten, own_feats_dim), (
+ enemy_feats_dim, ally_feats_dim)
+
+ def _build_inputs(self, batch, t):
+ bs = batch.batch_size
+ obs_component_dim, _ = self._get_obs_component_dim()
+ raw_obs_t = batch["obs"][:, t] # [batch, agent_num, obs_dim]
+ move_feats_t, enemy_feats_t, ally_feats_t, own_feats_t = th.split(raw_obs_t, obs_component_dim, dim=-1)
+ enemy_feats_t = enemy_feats_t.reshape(bs * self.n_agents * self.n_enemies,
+ -1) # [bs * n_agents * n_enemies, fea_dim]
+ ally_feats_t = ally_feats_t.reshape(bs * self.n_agents * self.n_allies,
+ -1) # [bs * n_agents * n_allies, a_fea_dim]
+ # merge move features and own features to simplify computation.
+ context_feats = [move_feats_t, own_feats_t] # [batch, agent_num, own_dim]
+ own_context = th.cat(context_feats, dim=2).reshape(bs * self.n_agents, -1) # [bs * n_agents, own_dim]
+
+ embedding_indices = []
+ if self.args.obs_agent_id:
+ # agent-id indices, [bs, n_agents]
+ embedding_indices.append(th.arange(self.n_agents, device=batch.device).unsqueeze(0).expand(bs, -1))
+ if self.args.obs_last_action:
+ # action-id indices, [bs, n_agents]
+ if t == 0:
+ embedding_indices.append(None)
+ else:
+ embedding_indices.append(batch["actions"][:, t - 1].squeeze(-1))
+
+ return bs, own_context, enemy_feats_t, ally_feats_t, embedding_indices
+
+ def _get_input_shape(self, scheme):
+ move_feats_dim, enemy_feats_dim, ally_feats_dim, own_feats_dim = self.args.obs_component
+ own_context_dim = move_feats_dim + own_feats_dim
+ return own_context_dim, enemy_feats_dim, ally_feats_dim
diff --git a/src/controllers/n_controller.py b/src/controllers/n_controller.py
new file mode 100644
index 0000000..9a6a6eb
--- /dev/null
+++ b/src/controllers/n_controller.py
@@ -0,0 +1,28 @@
+import os
+
+from modules.agents import REGISTRY as agent_REGISTRY
+from components.action_selectors import REGISTRY as action_REGISTRY
+from .basic_controller import BasicMAC
+import torch as th
+from utils.rl_utils import RunningMeanStd
+import numpy as np
+
+# This multi-agent controller shares parameters between agents
+class NMAC(BasicMAC):
+ def __init__(self, scheme, groups, args):
+ super(NMAC, self).__init__(scheme, groups, args)
+
+ def select_actions(self, ep_batch, t_ep, t_env, bs=slice(None), test_mode=False):
+ if t_ep == 0:
+ self.set_evaluation_mode()
+ # Only select actions for the selected batch elements in bs
+ avail_actions = ep_batch["avail_actions"][:, t_ep]
+ qvals = self.forward(ep_batch, t_ep, test_mode=test_mode)
+ chosen_actions = self.action_selector.select_action(qvals[bs], avail_actions[bs], t_env, test_mode=test_mode)
+ return chosen_actions
+
+ def forward(self, ep_batch, t, test_mode=False):
+ agent_inputs = self._build_inputs(ep_batch, t)
+ # avail_actions = ep_batch["avail_actions"][:, t]
+ agent_outs, self.hidden_states = self.agent(agent_inputs, self.hidden_states)
+ return agent_outs
\ No newline at end of file
diff --git a/src/controllers/updet_controller.py b/src/controllers/updet_controller.py
new file mode 100644
index 0000000..50ab6ee
--- /dev/null
+++ b/src/controllers/updet_controller.py
@@ -0,0 +1,73 @@
+from .basic_controller import BasicMAC
+import torch as th
+import numpy as np
+import torch.nn.functional as F
+
+
+class UPDETController(BasicMAC):
+ def select_actions(self, ep_batch, t_ep, t_env, bs=slice(None), test_mode=False):
+ if t_ep == 0:
+ self.set_evaluation_mode()
+ # Only select actions for the selected batch elements in bs
+ avail_actions = ep_batch["avail_actions"][:, t_ep]
+ agent_outputs = self.forward(ep_batch, t_ep, test_mode=test_mode)
+ chosen_actions = self.action_selector.select_action(agent_outputs[bs], avail_actions[bs], t_env,
+ test_mode=test_mode)
+ return chosen_actions
+
+ def _get_obs_shape(self):
+ size = 0
+ for comp in self.args.obs_component:
+ if isinstance(comp, int):
+ size += comp
+ else:
+ size += np.prod(comp)
+ return size
+
+ def _get_obs_component_dim(self):
+ move_feats_dim, enemy_feats_dim, ally_feats_dim, own_feats_dim = self.args.obs_component # [4, (6, 5), (4, 5), 1]
+ enemy_feats_dim = np.prod(enemy_feats_dim)
+ ally_feats_dim = np.prod(ally_feats_dim)
+ return move_feats_dim, enemy_feats_dim, ally_feats_dim, own_feats_dim
+
+ def _build_inputs(self, batch, t):
+ bs = batch.batch_size
+ raw_obs = batch["obs"][:, t] # [batch, agent_num, obs_dim]
+ # assert raw_obs.shape[-1] == self._get_obs_shape()
+ obs_component_dim = self._get_obs_component_dim()
+ move_feats, enemy_feats, ally_feats, own_feats = th.split(raw_obs, obs_component_dim, dim=-1)
+ own_context = th.cat((own_feats, move_feats), dim=2)
+ # use the max_dim (over self, enemy and ally) to init the token layer (to support all maps)
+ token_dim = max([self.input_shape[0], self.input_shape[1][-1], self.input_shape[2][-1]])
+
+ own_context = own_context.contiguous().view(bs * self.n_agents, 1, -1)
+ enemy_feats = enemy_feats.contiguous().view(bs * self.n_agents, self.args.n_enemies, -1)
+ ally_feats = ally_feats.contiguous().view(bs * self.n_agents, (self.args.n_agents - 1), -1)
+
+ # In the original repository, UPDeT only supports marine-based battle scenarios. e.g. 3m, 8m, 5m_vs_6m, whose feature_dim is the same
+ # We do zero paddings here to support all maps
+ inputs = th.cat([
+ self.zero_padding(own_context, token_dim),
+ self.zero_padding(enemy_feats, token_dim),
+ self.zero_padding(ally_feats, token_dim)
+ ], dim=1)
+
+ return inputs
+
+ def _get_input_shape(self, scheme):
+ move_feats_dim, enemy_feats_dim, ally_feats_dim, own_feats_dim = self.args.obs_component
+ own_context_dim = move_feats_dim + own_feats_dim
+ return own_context_dim, enemy_feats_dim, ally_feats_dim
+
+ def zero_padding(self, features, token_dim):
+ """
+ :param features: [bs * n_agents, k, fea_dim]
+ :param token_dim: maximum of fea_dim
+ :return:
+ """
+ existing_dim = features.shape[-1]
+ if existing_dim < token_dim:
+ # padding to the right side of the last dimension of the feature.
+ return F.pad(features, pad=[0, token_dim - existing_dim], mode='constant', value=0)
+ else:
+ return features
diff --git a/src/envs/__init__.py b/src/envs/__init__.py
new file mode 100644
index 0000000..a92cb2d
--- /dev/null
+++ b/src/envs/__init__.py
@@ -0,0 +1,44 @@
+from functools import partial
+import sys
+import os
+
+from .multiagentenv import MultiAgentEnv
+
+try:
+ smac = True
+ from .smac_v1 import StarCraft2EnvWrapper
+except Exception as e:
+ print(e)
+ smac = False
+
+try:
+ smacv2 = True
+ from .smac_v2 import StarCraft2Env2Wrapper
+except Exception as e:
+ print(e)
+ smacv2 = False
+
+
+def env_fn(env, **kwargs) -> MultiAgentEnv:
+ return env(**kwargs)
+
+
+REGISTRY = {}
+
+if smac:
+ REGISTRY["sc2"] = partial(env_fn, env=StarCraft2EnvWrapper)
+ if sys.platform == "linux":
+ os.environ.setdefault("SC2PATH",
+ os.path.join(os.getcwd(), "3rdparty", "StarCraftII"))
+else:
+ print("SMAC V1 is not supported...")
+
+if smacv2:
+ REGISTRY["sc2_v2"] = partial(env_fn, env=StarCraft2Env2Wrapper)
+ if sys.platform == "linux":
+ os.environ.setdefault("SC2PATH",
+ os.path.join(os.getcwd(), "3rdparty", "StarCraftII"))
+else:
+ print("SMAC V2 is not supported...")
+
+print("Supported environments:", REGISTRY)
diff --git a/src/envs/multiagentenv.py b/src/envs/multiagentenv.py
new file mode 100644
index 0000000..9c311f3
--- /dev/null
+++ b/src/envs/multiagentenv.py
@@ -0,0 +1,60 @@
+class MultiAgentEnv(object):
+
+ def step(self, actions):
+ """ Returns reward, terminated, info """
+ raise NotImplementedError
+
+ def get_obs(self):
+ """ Returns all agent observations in a list """
+ raise NotImplementedError
+
+ def get_obs_agent(self, agent_id):
+ """ Returns observation for agent_id """
+ raise NotImplementedError
+
+ def get_obs_size(self):
+ """ Returns the shape of the observation """
+ raise NotImplementedError
+
+ def get_state(self):
+ raise NotImplementedError
+
+ def get_state_size(self):
+ """ Returns the shape of the state"""
+ raise NotImplementedError
+
+ def get_avail_actions(self):
+ raise NotImplementedError
+
+ def get_avail_agent_actions(self, agent_id):
+ """ Returns the available actions for agent_id """
+ raise NotImplementedError
+
+ def get_total_actions(self):
+ """ Returns the total number of actions an agent could ever take """
+ # TODO: This is only suitable for a discrete 1 dimensional action space for each agent
+ raise NotImplementedError
+
+ def reset(self):
+ """ Returns initial observations and states"""
+ raise NotImplementedError
+
+ def render(self):
+ raise NotImplementedError
+
+ def close(self):
+ raise NotImplementedError
+
+ def seed(self):
+ raise NotImplementedError
+
+ def save_replay(self):
+ raise NotImplementedError
+
+ def get_env_info(self):
+ env_info = {"state_shape": self.get_state_size(),
+ "obs_shape": self.get_obs_size(),
+ "n_actions": self.get_total_actions(),
+ "n_agents": self.n_agents,
+ "episode_limit": self.episode_limit}
+ return env_info
diff --git a/src/envs/smac_v1/StarCraft2EnvWrapper.py b/src/envs/smac_v1/StarCraft2EnvWrapper.py
new file mode 100644
index 0000000..8ebc8bc
--- /dev/null
+++ b/src/envs/smac_v1/StarCraft2EnvWrapper.py
@@ -0,0 +1,148 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+Project: API-Network
+File: StarCraft2EnvWrapper.py
+Author: Hao Xiaotian
+Date: 2022/6/13 16:26
+"""
+
+from .official.starcraft2 import StarCraft2Env
+
+class StarCraft2EnvWrapper(StarCraft2Env):
+
+ # Add new functions to support permutation operation
+ def get_obs_component(self):
+ move_feats_dim = self.get_obs_move_feats_size()
+ enemy_feats_dim = self.get_obs_enemy_feats_size()
+ ally_feats_dim = self.get_obs_ally_feats_size()
+ own_feats_dim = self.get_obs_own_feats_size()
+ obs_component = [move_feats_dim, enemy_feats_dim, ally_feats_dim, own_feats_dim]
+ return obs_component
+
+ def get_state_component(self):
+ if self.obs_instead_of_state:
+ return [self.get_obs_size()] * self.n_agents
+
+ nf_al = 4 + self.shield_bits_ally + self.unit_type_bits
+ nf_en = 3 + self.shield_bits_enemy + self.unit_type_bits
+
+ enemy_state = self.n_enemies * nf_en
+ ally_state = self.n_agents * nf_al
+
+ size = [ally_state, enemy_state]
+
+ if self.state_last_action:
+ size.append(self.n_agents * self.n_actions)
+ if self.state_timestep_number:
+ size.append(1)
+ return size
+
+ def get_env_info(self):
+ print("Starting to get environment info...") # 改用英文
+ env_info = {
+ "state_shape": self.get_state_size(),
+ "obs_shape": self.get_obs_size(),
+ "n_actions": self.get_total_actions(),
+ "n_agents": self.n_agents,
+ "n_enemies": self.n_enemies,
+ "episode_limit": self.episode_limit,
+
+ "n_normal_actions": self.n_actions_no_attack,
+ "n_allies": self.n_agents - 1,
+ # "obs_ally_feats_size": self.get_obs_ally_feats_size(),
+ # "obs_enemy_feats_size": self.get_obs_enemy_feats_size(),
+ "state_ally_feats_size": self.get_ally_num_attributes(), # 4 + self.shield_bits_ally + self.unit_type_bits,
+ "state_enemy_feats_size": self.get_enemy_num_attributes(),
+ # 3 + self.shield_bits_enemy + self.unit_type_bits,
+ "obs_component": self.get_obs_component(),
+ "state_component": self.get_state_component(),
+ "map_type": self.map_type,
+ }
+ print("Environment info:", env_info) # 改用英文
+ return env_info
+
+ def _get_medivac_ids(self):
+ print("Starting to get medivac IDs...") # 改用英文
+ medivac_ids = []
+ for al_id, al_unit in self.agents.items():
+ if self.map_type == "MMM" and al_unit.unit_type == self.medivac_id:
+ medivac_ids.append(al_id)
+ print("Medivac IDs:", medivac_ids) # 改用英文
+ return medivac_ids
+
+ # def reward_battle(self):
+ # """Reward function when self.reward_spare==False.
+ #
+ # Fix the **REWARD FUNCTION BUG** of the original starcraft2.py.
+ #
+ # We carefully check the code and indeed find some code error in starcraft2.py.
+ # The error is caused by the incorrect reward calculation for the shield regeneration process and this error will
+ # only occur for scenarios where the enemies are Protoss units.
+ #
+ # (1) At line 717 of reward_battle() of starcraft2.py, the reward is computed as: reward = abs(delta_enemy).
+ # Normally, when the agents attack the enemies, delta_enemy will > 0 and thus the agents will be rewarded for attacking enemies.
+ #
+ # (2) For Protoss enemies, delta_enemy can < 0 due to the shield regeneration. However, due to the abs() taken over delta_enemy,
+ # the agents will still be rewarded when the enemies' shields regenerate. This incorrect reward will lead to undesired behaviors,
+ # e.g., attacking the enemies but not killing them and waiting their shields regenerating.
+ #
+ # (3) Due to the PI/PE design and the improved representational capacity, HPN-QMIX is more sensitive to such
+ # incorrect rewards and sometimes learn strange behaviors.
+ #
+ # Returns accumulative hit/shield point damage dealt to the enemy
+ # + reward_death_value per enemy unit killed, and, in case
+ # self.reward_only_positive == False, - (damage dealt to ally units
+ # + reward_death_value per ally unit killed) * self.reward_negative_scale
+ # """
+ # if self.reward_sparse:
+ # return 0
+ #
+ # reward = 0
+ # delta_deaths = 0
+ # delta_ally = 0
+ # delta_enemy = 0
+ #
+ # neg_scale = self.reward_negative_scale
+ #
+ # # update deaths
+ # for al_id, al_unit in self.agents.items():
+ # if not self.death_tracker_ally[al_id]:
+ # # did not die so far
+ # prev_health = (
+ # self.previous_ally_units[al_id].health
+ # + self.previous_ally_units[al_id].shield
+ # )
+ # if al_unit.health == 0:
+ # # just died
+ # self.death_tracker_ally[al_id] = 1
+ # if not self.reward_only_positive:
+ # delta_deaths -= self.reward_death_value * neg_scale
+ # delta_ally += prev_health * neg_scale
+ # else:
+ # # still alive
+ # delta_ally += neg_scale * (
+ # prev_health - al_unit.health - al_unit.shield
+ # )
+ #
+ # for e_id, e_unit in self.enemies.items():
+ # if not self.death_tracker_enemy[e_id]:
+ # prev_health = (
+ # self.previous_enemy_units[e_id].health
+ # + self.previous_enemy_units[e_id].shield
+ # )
+ # if e_unit.health == 0:
+ # self.death_tracker_enemy[e_id] = 1
+ # delta_deaths += self.reward_death_value
+ # delta_enemy += prev_health
+ # else:
+ # delta_enemy += prev_health - e_unit.health - e_unit.shield
+ #
+ # if self.reward_only_positive:
+ # ###### reward = abs(delta_enemy + delta_deaths) # shield regeneration (the original wrong implementation)
+ # # reward = max(delta_enemy, 0) + delta_deaths # only consider the shield damage
+ # reward = delta_enemy + delta_deaths # consider the `+shield-damage` and the `-shield-regeneration`
+ # else:
+ # reward = delta_enemy + delta_deaths - delta_ally
+ #
+ # return reward
\ No newline at end of file
diff --git a/src/envs/smac_v1/__init__.py b/src/envs/smac_v1/__init__.py
new file mode 100644
index 0000000..d35a953
--- /dev/null
+++ b/src/envs/smac_v1/__init__.py
@@ -0,0 +1 @@
+from .StarCraft2EnvWrapper import StarCraft2EnvWrapper
\ No newline at end of file
diff --git a/src/envs/smac_v1/official/maps/SMAC_Maps/10m_vs_11m.SC2Map b/src/envs/smac_v1/official/maps/SMAC_Maps/10m_vs_11m.SC2Map
new file mode 100644
index 0000000..1dc2286
Binary files /dev/null and b/src/envs/smac_v1/official/maps/SMAC_Maps/10m_vs_11m.SC2Map differ
diff --git a/src/envs/smac_v1/official/maps/SMAC_Maps/1c3s5z.SC2Map b/src/envs/smac_v1/official/maps/SMAC_Maps/1c3s5z.SC2Map
new file mode 100644
index 0000000..07dfe38
Binary files /dev/null and b/src/envs/smac_v1/official/maps/SMAC_Maps/1c3s5z.SC2Map differ
diff --git a/src/envs/smac_v1/official/maps/SMAC_Maps/25m.SC2Map b/src/envs/smac_v1/official/maps/SMAC_Maps/25m.SC2Map
new file mode 100644
index 0000000..fcfdeb0
Binary files /dev/null and b/src/envs/smac_v1/official/maps/SMAC_Maps/25m.SC2Map differ
diff --git a/src/envs/smac_v1/official/maps/SMAC_Maps/27m_vs_30m.SC2Map b/src/envs/smac_v1/official/maps/SMAC_Maps/27m_vs_30m.SC2Map
new file mode 100644
index 0000000..861c7f7
Binary files /dev/null and b/src/envs/smac_v1/official/maps/SMAC_Maps/27m_vs_30m.SC2Map differ
diff --git a/src/envs/smac_v1/official/maps/SMAC_Maps/2c_vs_64zg.SC2Map b/src/envs/smac_v1/official/maps/SMAC_Maps/2c_vs_64zg.SC2Map
new file mode 100644
index 0000000..b740b6c
Binary files /dev/null and b/src/envs/smac_v1/official/maps/SMAC_Maps/2c_vs_64zg.SC2Map differ
diff --git a/src/envs/smac_v1/official/maps/SMAC_Maps/2m_vs_1z.SC2Map b/src/envs/smac_v1/official/maps/SMAC_Maps/2m_vs_1z.SC2Map
new file mode 100644
index 0000000..f4c05c4
Binary files /dev/null and b/src/envs/smac_v1/official/maps/SMAC_Maps/2m_vs_1z.SC2Map differ
diff --git a/src/envs/smac_v1/official/maps/SMAC_Maps/2s3z.SC2Map b/src/envs/smac_v1/official/maps/SMAC_Maps/2s3z.SC2Map
new file mode 100644
index 0000000..59846cc
Binary files /dev/null and b/src/envs/smac_v1/official/maps/SMAC_Maps/2s3z.SC2Map differ
diff --git a/src/envs/smac_v1/official/maps/SMAC_Maps/2s_vs_1sc.SC2Map b/src/envs/smac_v1/official/maps/SMAC_Maps/2s_vs_1sc.SC2Map
new file mode 100644
index 0000000..c03328d
Binary files /dev/null and b/src/envs/smac_v1/official/maps/SMAC_Maps/2s_vs_1sc.SC2Map differ
diff --git a/src/envs/smac_v1/official/maps/SMAC_Maps/3m.SC2Map b/src/envs/smac_v1/official/maps/SMAC_Maps/3m.SC2Map
new file mode 100644
index 0000000..b35ec10
Binary files /dev/null and b/src/envs/smac_v1/official/maps/SMAC_Maps/3m.SC2Map differ
diff --git a/src/envs/smac_v1/official/maps/SMAC_Maps/3s5z.SC2Map b/src/envs/smac_v1/official/maps/SMAC_Maps/3s5z.SC2Map
new file mode 100644
index 0000000..e5a4313
Binary files /dev/null and b/src/envs/smac_v1/official/maps/SMAC_Maps/3s5z.SC2Map differ
diff --git a/src/envs/smac_v1/official/maps/SMAC_Maps/3s5z_vs_3s6z.SC2Map b/src/envs/smac_v1/official/maps/SMAC_Maps/3s5z_vs_3s6z.SC2Map
new file mode 100644
index 0000000..3927ca4
Binary files /dev/null and b/src/envs/smac_v1/official/maps/SMAC_Maps/3s5z_vs_3s6z.SC2Map differ
diff --git a/src/envs/smac_v1/official/maps/SMAC_Maps/3s_vs_3z.SC2Map b/src/envs/smac_v1/official/maps/SMAC_Maps/3s_vs_3z.SC2Map
new file mode 100644
index 0000000..4de7cf8
Binary files /dev/null and b/src/envs/smac_v1/official/maps/SMAC_Maps/3s_vs_3z.SC2Map differ
diff --git a/src/envs/smac_v1/official/maps/SMAC_Maps/3s_vs_4z.SC2Map b/src/envs/smac_v1/official/maps/SMAC_Maps/3s_vs_4z.SC2Map
new file mode 100644
index 0000000..8db2dfc
Binary files /dev/null and b/src/envs/smac_v1/official/maps/SMAC_Maps/3s_vs_4z.SC2Map differ
diff --git a/src/envs/smac_v1/official/maps/SMAC_Maps/3s_vs_5z.SC2Map b/src/envs/smac_v1/official/maps/SMAC_Maps/3s_vs_5z.SC2Map
new file mode 100644
index 0000000..70c99d2
Binary files /dev/null and b/src/envs/smac_v1/official/maps/SMAC_Maps/3s_vs_5z.SC2Map differ
diff --git a/src/envs/smac_v1/official/maps/SMAC_Maps/5m_vs_6m.SC2Map b/src/envs/smac_v1/official/maps/SMAC_Maps/5m_vs_6m.SC2Map
new file mode 100644
index 0000000..f2ae42c
Binary files /dev/null and b/src/envs/smac_v1/official/maps/SMAC_Maps/5m_vs_6m.SC2Map differ
diff --git a/src/envs/smac_v1/official/maps/SMAC_Maps/6h_vs_8z.SC2Map b/src/envs/smac_v1/official/maps/SMAC_Maps/6h_vs_8z.SC2Map
new file mode 100644
index 0000000..df01eb6
Binary files /dev/null and b/src/envs/smac_v1/official/maps/SMAC_Maps/6h_vs_8z.SC2Map differ
diff --git a/src/envs/smac_v1/official/maps/SMAC_Maps/8m.SC2Map b/src/envs/smac_v1/official/maps/SMAC_Maps/8m.SC2Map
new file mode 100644
index 0000000..6593c72
Binary files /dev/null and b/src/envs/smac_v1/official/maps/SMAC_Maps/8m.SC2Map differ
diff --git a/src/envs/smac_v1/official/maps/SMAC_Maps/8m_vs_9m.SC2Map b/src/envs/smac_v1/official/maps/SMAC_Maps/8m_vs_9m.SC2Map
new file mode 100644
index 0000000..5b8815f
Binary files /dev/null and b/src/envs/smac_v1/official/maps/SMAC_Maps/8m_vs_9m.SC2Map differ
diff --git a/src/envs/smac_v1/official/maps/SMAC_Maps/MMM.SC2Map b/src/envs/smac_v1/official/maps/SMAC_Maps/MMM.SC2Map
new file mode 100644
index 0000000..ed26fe4
Binary files /dev/null and b/src/envs/smac_v1/official/maps/SMAC_Maps/MMM.SC2Map differ
diff --git a/src/envs/smac_v1/official/maps/SMAC_Maps/MMM2.SC2Map b/src/envs/smac_v1/official/maps/SMAC_Maps/MMM2.SC2Map
new file mode 100644
index 0000000..ab25a02
Binary files /dev/null and b/src/envs/smac_v1/official/maps/SMAC_Maps/MMM2.SC2Map differ
diff --git a/src/envs/smac_v1/official/maps/SMAC_Maps/bane_vs_bane.SC2Map b/src/envs/smac_v1/official/maps/SMAC_Maps/bane_vs_bane.SC2Map
new file mode 100644
index 0000000..bb81284
Binary files /dev/null and b/src/envs/smac_v1/official/maps/SMAC_Maps/bane_vs_bane.SC2Map differ
diff --git a/src/envs/smac_v1/official/maps/SMAC_Maps/corridor.SC2Map b/src/envs/smac_v1/official/maps/SMAC_Maps/corridor.SC2Map
new file mode 100644
index 0000000..90daed6
Binary files /dev/null and b/src/envs/smac_v1/official/maps/SMAC_Maps/corridor.SC2Map differ
diff --git a/src/envs/smac_v1/official/maps/SMAC_Maps/so_many_baneling.SC2Map b/src/envs/smac_v1/official/maps/SMAC_Maps/so_many_baneling.SC2Map
new file mode 100644
index 0000000..6a184e3
Binary files /dev/null and b/src/envs/smac_v1/official/maps/SMAC_Maps/so_many_baneling.SC2Map differ
diff --git a/src/envs/smac_v1/official/maps/__init__.py b/src/envs/smac_v1/official/maps/__init__.py
new file mode 100644
index 0000000..4017bb3
--- /dev/null
+++ b/src/envs/smac_v1/official/maps/__init__.py
@@ -0,0 +1,10 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from . import smac_maps
+
+
+def get_map_params(map_name):
+ map_param_registry = smac_maps.get_smac_map_registry()
+ return map_param_registry[map_name]
diff --git a/src/envs/smac_v1/official/maps/smac_maps.py b/src/envs/smac_v1/official/maps/smac_maps.py
new file mode 100644
index 0000000..6c53bc9
--- /dev/null
+++ b/src/envs/smac_v1/official/maps/smac_maps.py
@@ -0,0 +1,268 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from pysc2.maps import lib
+
+
+class SMACMap(lib.Map):
+ directory = "SMAC_Maps"
+ download = "https://github.com/oxwhirl/smac#smac-maps"
+ players = 2
+ step_mul = 8
+ game_steps_per_episode = 0
+
+
+map_param_registry = {
+"1m": {
+ "n_agents": 1,
+ "n_enemies": 1,
+ "limit": 30,
+ "a_race": "T",
+ "b_race": "T",
+ "unit_type_bits": 0,
+ "map_type": "marines",
+ },
+ "2m": {
+ "n_agents": 2,
+ "n_enemies": 2,
+ "limit": 60,
+ "a_race": "T",
+ "b_race": "T",
+ "unit_type_bits": 0,
+ "map_type": "marines",
+ },
+ "4m": {
+ "n_agents": 4,
+ "n_enemies": 4,
+ "limit": 60,
+ "a_race": "T",
+ "b_race": "T",
+ "unit_type_bits": 0,
+ "map_type": "marines",
+ },
+ "5m": {
+ "n_agents": 5,
+ "n_enemies": 5,
+ "limit": 60,
+ "a_race": "T",
+ "b_race": "T",
+ "unit_type_bits": 0,
+ "map_type": "marines",
+ },
+ "3m": {
+ "n_agents": 3,
+ "n_enemies": 3,
+ "limit": 60,
+ "a_race": "T",
+ "b_race": "T",
+ "unit_type_bits": 0,
+ "map_type": "marines",
+ },
+ "8m": {
+ "n_agents": 8,
+ "n_enemies": 8,
+ "limit": 120,
+ "a_race": "T",
+ "b_race": "T",
+ "unit_type_bits": 0,
+ "map_type": "marines",
+ },
+ "25m": {
+ "n_agents": 25,
+ "n_enemies": 25,
+ "limit": 150,
+ "a_race": "T",
+ "b_race": "T",
+ "unit_type_bits": 0,
+ "map_type": "marines",
+ },
+ "5m_vs_6m": {
+ "n_agents": 5,
+ "n_enemies": 6,
+ "limit": 70,
+ "a_race": "T",
+ "b_race": "T",
+ "unit_type_bits": 0,
+ "map_type": "marines",
+ },
+ "8m_vs_9m": {
+ "n_agents": 8,
+ "n_enemies": 9,
+ "limit": 120,
+ "a_race": "T",
+ "b_race": "T",
+ "unit_type_bits": 0,
+ "map_type": "marines",
+ },
+ "10m_vs_11m": {
+ "n_agents": 10,
+ "n_enemies": 11,
+ "limit": 150,
+ "a_race": "T",
+ "b_race": "T",
+ "unit_type_bits": 0,
+ "map_type": "marines",
+ },
+ "27m_vs_30m": {
+ "n_agents": 27,
+ "n_enemies": 30,
+ "limit": 180,
+ "a_race": "T",
+ "b_race": "T",
+ "unit_type_bits": 0,
+ "map_type": "marines",
+ },
+ "MMM": {
+ "n_agents": 10,
+ "n_enemies": 10,
+ "limit": 150,
+ "a_race": "T",
+ "b_race": "T",
+ "unit_type_bits": 3,
+ "map_type": "MMM",
+ },
+ "MMM2": {
+ "n_agents": 10,
+ "n_enemies": 12,
+ "limit": 180,
+ "a_race": "T",
+ "b_race": "T",
+ "unit_type_bits": 3,
+ "map_type": "MMM",
+ },
+ "2s3z": {
+ "n_agents": 5,
+ "n_enemies": 5,
+ "limit": 120,
+ "a_race": "P",
+ "b_race": "P",
+ "unit_type_bits": 2,
+ "map_type": "stalkers_and_zealots",
+ },
+ "3s5z": {
+ "n_agents": 8,
+ "n_enemies": 8,
+ "limit": 150,
+ "a_race": "P",
+ "b_race": "P",
+ "unit_type_bits": 2,
+ "map_type": "stalkers_and_zealots",
+ },
+ "3s5z_vs_3s6z": {
+ "n_agents": 8,
+ "n_enemies": 9,
+ "limit": 170,
+ "a_race": "P",
+ "b_race": "P",
+ "unit_type_bits": 2,
+ "map_type": "stalkers_and_zealots",
+ },
+ "3s_vs_3z": {
+ "n_agents": 3,
+ "n_enemies": 3,
+ "limit": 150,
+ "a_race": "P",
+ "b_race": "P",
+ "unit_type_bits": 0,
+ "map_type": "stalkers",
+ },
+ "3s_vs_4z": {
+ "n_agents": 3,
+ "n_enemies": 4,
+ "limit": 200,
+ "a_race": "P",
+ "b_race": "P",
+ "unit_type_bits": 0,
+ "map_type": "stalkers",
+ },
+ "3s_vs_5z": {
+ "n_agents": 3,
+ "n_enemies": 5,
+ "limit": 250,
+ "a_race": "P",
+ "b_race": "P",
+ "unit_type_bits": 0,
+ "map_type": "stalkers",
+ },
+ "1c3s5z": {
+ "n_agents": 9,
+ "n_enemies": 9,
+ "limit": 180,
+ "a_race": "P",
+ "b_race": "P",
+ "unit_type_bits": 3,
+ "map_type": "colossi_stalkers_zealots",
+ },
+ "2m_vs_1z": {
+ "n_agents": 2,
+ "n_enemies": 1,
+ "limit": 150,
+ "a_race": "T",
+ "b_race": "P",
+ "unit_type_bits": 0,
+ "map_type": "marines",
+ },
+ "corridor": {
+ "n_agents": 6,
+ "n_enemies": 24,
+ "limit": 400,
+ "a_race": "P",
+ "b_race": "Z",
+ "unit_type_bits": 0,
+ "map_type": "zealots",
+ },
+ "6h_vs_8z": {
+ "n_agents": 6,
+ "n_enemies": 8,
+ "limit": 150,
+ "a_race": "Z",
+ "b_race": "P",
+ "unit_type_bits": 0,
+ "map_type": "hydralisks",
+ },
+ "2s_vs_1sc": {
+ "n_agents": 2,
+ "n_enemies": 1,
+ "limit": 300,
+ "a_race": "P",
+ "b_race": "Z",
+ "unit_type_bits": 0,
+ "map_type": "stalkers",
+ },
+ "so_many_baneling": {
+ "n_agents": 7,
+ "n_enemies": 32,
+ "limit": 100,
+ "a_race": "P",
+ "b_race": "Z",
+ "unit_type_bits": 0,
+ "map_type": "zealots",
+ },
+ "bane_vs_bane": {
+ "n_agents": 24,
+ "n_enemies": 24,
+ "limit": 200,
+ "a_race": "Z",
+ "b_race": "Z",
+ "unit_type_bits": 2,
+ "map_type": "bane",
+ },
+ "2c_vs_64zg": {
+ "n_agents": 2,
+ "n_enemies": 64,
+ "limit": 400,
+ "a_race": "P",
+ "b_race": "Z",
+ "unit_type_bits": 0,
+ "map_type": "colossus",
+ },
+}
+
+
+def get_smac_map_registry():
+ return map_param_registry
+
+
+for name in map_param_registry.keys():
+ globals()[name] = type(name, (SMACMap,), dict(filename=name))
diff --git a/src/envs/smac_v1/official/starcraft2.py b/src/envs/smac_v1/official/starcraft2.py
new file mode 100644
index 0000000..d9ff301
--- /dev/null
+++ b/src/envs/smac_v1/official/starcraft2.py
@@ -0,0 +1,1699 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from envs.multiagentenv import MultiAgentEnv
+from .maps import get_map_params
+
+import atexit
+from warnings import warn
+from operator import attrgetter
+from copy import deepcopy
+import numpy as np
+import enum
+import math
+from absl import logging
+
+from pysc2 import maps
+from pysc2 import run_configs
+from pysc2.lib import protocol
+
+from s2clientprotocol import common_pb2 as sc_common
+from s2clientprotocol import sc2api_pb2 as sc_pb
+from s2clientprotocol import raw_pb2 as r_pb
+from s2clientprotocol import debug_pb2 as d_pb
+
+races = {
+ "R": sc_common.Random,
+ "P": sc_common.Protoss,
+ "T": sc_common.Terran,
+ "Z": sc_common.Zerg,
+}
+
+difficulties = {
+ "1": sc_pb.VeryEasy,
+ "2": sc_pb.Easy,
+ "3": sc_pb.Medium,
+ "4": sc_pb.MediumHard,
+ "5": sc_pb.Hard,
+ "6": sc_pb.Harder,
+ "7": sc_pb.VeryHard,
+ "8": sc_pb.CheatVision,
+ "9": sc_pb.CheatMoney,
+ "A": sc_pb.CheatInsane,
+}
+
+actions = {
+ "move": 16, # target: PointOrUnit
+ "attack": 23, # target: PointOrUnit
+ "stop": 4, # target: None
+ "heal": 386, # Unit
+}
+
+
+class Direction(enum.IntEnum):
+ NORTH = 0
+ SOUTH = 1
+ EAST = 2
+ WEST = 3
+
+
+class StarCraft2Env(MultiAgentEnv):
+ """The StarCraft II environment for decentralised multi-agent
+ micromanagement scenarios.
+ """
+
+ def __init__(
+ self,
+ map_name="8m",
+ step_mul=8,
+ move_amount=2,
+ difficulty="7",
+ game_version=None,
+ seed=None,
+ continuing_episode=False,
+ obs_all_health=True,
+ obs_own_health=True,
+ obs_last_action=False,
+ obs_pathing_grid=False,
+ obs_terrain_height=False,
+ obs_instead_of_state=False,
+ obs_timestep_number=False,
+ state_last_action=True,
+ state_timestep_number=False,
+ reward_sparse=False,
+ reward_only_positive=True,
+ reward_death_value=10,
+ reward_win=200,
+ reward_defeat=0,
+ reward_negative_scale=0.5,
+ reward_scale=True,
+ reward_scale_rate=20,
+ replay_dir="",
+ replay_prefix="",
+ window_size_x=1920,
+ window_size_y=1200,
+ heuristic_ai=False,
+ heuristic_rest=False,
+ debug=False,
+ ):
+ """
+ Create a StarCraftC2Env environment.
+
+ Parameters
+ ----------
+ map_name : str, optional
+ The name of the SC2 map to play (default is "8m"). The full list
+ can be found by running bin/map_list.
+ step_mul : int, optional
+ How many game steps per agent step (default is 8). None
+ indicates to use the default map step_mul.
+ move_amount : float, optional
+ How far away units are ordered to move per step (default is 2).
+ difficulty : str, optional
+ The difficulty of built-in computer AI bot (default is "7").
+ game_version : str, optional
+ StarCraft II game version (default is None). None indicates the
+ latest version.
+ seed : int, optional
+ Random seed used during game initialisation. This allows to
+ continuing_episode : bool, optional
+ Whether to consider episodes continuing or finished after time
+ limit is reached (default is False).
+ obs_all_health : bool, optional
+ Agents receive the health of all units (in the sight range) as part
+ of observations (default is True).
+ obs_own_health : bool, optional
+ Agents receive their own health as a part of observations (default
+ is False). This flag is ignored when obs_all_health == True.
+ obs_last_action : bool, optional
+ Agents receive the last actions of all units (in the sight range)
+ as part of observations (default is False).
+ obs_pathing_grid : bool, optional
+ Whether observations include pathing values surrounding the agent
+ (default is False).
+ obs_terrain_height : bool, optional
+ Whether observations include terrain height values surrounding the
+ agent (default is False).
+ obs_instead_of_state : bool, optional
+ Use combination of all agents' observations as the global state
+ (default is False).
+ obs_timestep_number : bool, optional
+ Whether observations include the current timestep of the episode
+ (default is False).
+ state_last_action : bool, optional
+ Include the last actions of all agents as part of the global state
+ (default is True).
+ state_timestep_number : bool, optional
+ Whether the state include the current timestep of the episode
+ (default is False).
+ reward_sparse : bool, optional
+ Receive 1/-1 reward for winning/loosing an episode (default is
+ False). Whe rest of reward parameters are ignored if True.
+ reward_only_positive : bool, optional
+ Reward is always positive (default is True).
+ reward_death_value : float, optional
+ The amount of reward received for killing an enemy unit (default
+ is 10). This is also the negative penalty for having an allied unit
+ killed if reward_only_positive == False.
+ reward_win : float, optional
+ The reward for winning in an episode (default is 200).
+ reward_defeat : float, optional
+ The reward for loosing in an episode (default is 0). This value
+ should be nonpositive.
+ reward_negative_scale : float, optional
+ Scaling factor for negative rewards (default is 0.5). This
+ parameter is ignored when reward_only_positive == True.
+ reward_scale : bool, optional
+ Whether or not to scale the reward (default is True).
+ reward_scale_rate : float, optional
+ Reward scale rate (default is 20). When reward_scale == True, the
+ reward received by the agents is divided by (max_reward /
+ reward_scale_rate), where max_reward is the maximum possible
+ reward per episode without considering the shield regeneration
+ of Protoss units.
+ replay_dir : str, optional
+ The directory to save replays (default is None). If None, the
+ replay will be saved in Replays directory where StarCraft II is
+ installed.
+ replay_prefix : str, optional
+ The prefix of the replay to be saved (default is None). If None,
+ the name of the map will be used.
+ window_size_x : int, optional
+ The length of StarCraft II window size (default is 1920).
+ window_size_y: int, optional
+ The height of StarCraft II window size (default is 1200).
+ heuristic_ai: bool, optional
+ Whether or not to use a non-learning heuristic AI (default False).
+ heuristic_rest: bool, optional
+ At any moment, restrict the actions of the heuristic AI to be
+ chosen from actions available to RL agents (default is False).
+ Ignored if heuristic_ai == False.
+ debug: bool, optional
+ Log messages about observations, state, actions and rewards for
+ debugging purposes (default is False).
+ """
+ # Map arguments
+ self.map_name = map_name
+ map_params = get_map_params(self.map_name)
+ self.n_agents = map_params["n_agents"]
+ self.n_enemies = map_params["n_enemies"]
+ self.episode_limit = map_params["limit"]
+ self._move_amount = move_amount
+ self._step_mul = step_mul
+ self.difficulty = difficulty
+
+ # Observations and state
+ self.obs_own_health = obs_own_health
+ self.obs_all_health = obs_all_health
+ self.obs_instead_of_state = obs_instead_of_state
+ self.obs_last_action = obs_last_action
+ self.obs_pathing_grid = obs_pathing_grid
+ self.obs_terrain_height = obs_terrain_height
+ self.obs_timestep_number = obs_timestep_number
+ self.state_last_action = state_last_action
+ self.state_timestep_number = state_timestep_number
+ if self.obs_all_health:
+ self.obs_own_health = True
+ self.n_obs_pathing = 8
+ self.n_obs_height = 9
+
+ # Rewards args
+ self.reward_sparse = reward_sparse
+ self.reward_only_positive = reward_only_positive
+ self.reward_negative_scale = reward_negative_scale
+ self.reward_death_value = reward_death_value
+ self.reward_win = reward_win
+ self.reward_defeat = reward_defeat
+ self.reward_scale = reward_scale
+ self.reward_scale_rate = reward_scale_rate
+
+ # Other
+ self.game_version = game_version
+ self.continuing_episode = continuing_episode
+ self._seed = seed
+ self.heuristic_ai = heuristic_ai
+ self.heuristic_rest = heuristic_rest
+ self.debug = debug
+ self.window_size = (window_size_x, window_size_y)
+ self.replay_dir = replay_dir
+ self.replay_prefix = replay_prefix
+
+ # Actions
+ self.n_actions_no_attack = 6
+ self.n_actions_move = 4
+ self.n_actions = self.n_actions_no_attack + self.n_enemies
+
+ # Map info
+ self._agent_race = map_params["a_race"]
+ self._bot_race = map_params["b_race"]
+ self.shield_bits_ally = 1 if self._agent_race == "P" else 0
+ self.shield_bits_enemy = 1 if self._bot_race == "P" else 0
+ self.unit_type_bits = map_params["unit_type_bits"]
+ self.map_type = map_params["map_type"]
+ self._unit_types = None
+
+ self.max_reward = (
+ self.n_enemies * self.reward_death_value + self.reward_win
+ )
+
+ # create lists containing the names of attributes returned in states
+ self.ally_state_attr_names = [
+ "health",
+ "energy/cooldown",
+ "rel_x",
+ "rel_y",
+ ]
+ self.enemy_state_attr_names = ["health", "rel_x", "rel_y"]
+
+ if self.shield_bits_ally > 0:
+ self.ally_state_attr_names += ["shield"]
+ if self.shield_bits_enemy > 0:
+ self.enemy_state_attr_names += ["shield"]
+
+ if self.unit_type_bits > 0:
+ bit_attr_names = [
+ "type_{}".format(bit) for bit in range(self.unit_type_bits)
+ ]
+ self.ally_state_attr_names += bit_attr_names
+ self.enemy_state_attr_names += bit_attr_names
+
+ self.agents = {}
+ self.enemies = {}
+ self._episode_count = 0
+ self._episode_steps = 0
+ self._total_steps = 0
+ self._obs = None
+ self.battles_won = 0
+ self.battles_game = 0
+ self.timeouts = 0
+ self.force_restarts = 0
+ self.last_stats = None
+ self.death_tracker_ally = np.zeros(self.n_agents)
+ self.death_tracker_enemy = np.zeros(self.n_enemies)
+ self.previous_ally_units = None
+ self.previous_enemy_units = None
+ self.last_action = np.zeros((self.n_agents, self.n_actions))
+ self._min_unit_type = 0
+ self.marine_id = self.marauder_id = self.medivac_id = 0
+ self.hydralisk_id = self.zergling_id = self.baneling_id = 0
+ self.stalker_id = self.colossus_id = self.zealot_id = 0
+ self.max_distance_x = 0
+ self.max_distance_y = 0
+ self.map_x = 0
+ self.map_y = 0
+ self.reward = 0
+ self.renderer = None
+ self.terrain_height = None
+ self.pathing_grid = None
+ self._run_config = None
+ self._sc2_proc = None
+ self._controller = None
+
+ # Try to avoid leaking SC2 processes on shutdown
+ atexit.register(lambda: self.close())
+
+ def _launch(self):
+ """Launch the StarCraft II game."""
+ self._run_config = run_configs.get(version=self.game_version)
+ _map = maps.get(self.map_name)
+
+ # Setting up the interface
+ interface_options = sc_pb.InterfaceOptions(raw=True, score=False)
+ self._sc2_proc = self._run_config.start(
+ window_size=self.window_size, want_rgb=False
+ )
+ self._controller = self._sc2_proc.controller
+
+ # Request to create the game
+ create = sc_pb.RequestCreateGame(
+ local_map=sc_pb.LocalMap(
+ map_path=_map.path,
+ map_data=self._run_config.map_data(_map.path),
+ ),
+ realtime=False,
+ random_seed=self._seed,
+ )
+ create.player_setup.add(type=sc_pb.Participant)
+ create.player_setup.add(
+ type=sc_pb.Computer,
+ race=races[self._bot_race],
+ difficulty=difficulties[self.difficulty],
+ )
+ self._controller.create_game(create)
+
+ join = sc_pb.RequestJoinGame(
+ race=races[self._agent_race], options=interface_options
+ )
+ self._controller.join_game(join)
+
+ game_info = self._controller.game_info()
+ map_info = game_info.start_raw
+ map_play_area_min = map_info.playable_area.p0
+ map_play_area_max = map_info.playable_area.p1
+ self.max_distance_x = map_play_area_max.x - map_play_area_min.x
+ self.max_distance_y = map_play_area_max.y - map_play_area_min.y
+ self.map_x = map_info.map_size.x
+ self.map_y = map_info.map_size.y
+
+ if map_info.pathing_grid.bits_per_pixel == 1:
+ vals = np.array(list(map_info.pathing_grid.data)).reshape(
+ self.map_x, int(self.map_y / 8)
+ )
+ self.pathing_grid = np.transpose(
+ np.array(
+ [
+ [(b >> i) & 1 for b in row for i in range(7, -1, -1)]
+ for row in vals
+ ],
+ dtype=bool,
+ )
+ )
+ else:
+ self.pathing_grid = np.invert(
+ np.flip(
+ np.transpose(
+ np.array(
+ list(map_info.pathing_grid.data), dtype=np.bool
+ ).reshape(self.map_x, self.map_y)
+ ),
+ axis=1,
+ )
+ )
+
+ self.terrain_height = (
+ np.flip(
+ np.transpose(
+ np.array(list(map_info.terrain_height.data)).reshape(
+ self.map_x, self.map_y
+ )
+ ),
+ 1,
+ )
+ / 255
+ )
+
+ def reset(self):
+ """Reset the environment. Required after each full episode.
+ Returns initial observations and states.
+ """
+ self._episode_steps = 0
+ if self._episode_count == 0:
+ # Launch StarCraft II
+ self._launch()
+ else:
+ self._restart()
+
+ # Information kept for counting the reward
+ self.death_tracker_ally = np.zeros(self.n_agents)
+ self.death_tracker_enemy = np.zeros(self.n_enemies)
+ self.previous_ally_units = None
+ self.previous_enemy_units = None
+ self.win_counted = False
+ self.defeat_counted = False
+
+ self.last_action = np.zeros((self.n_agents, self.n_actions))
+
+ if self.heuristic_ai:
+ self.heuristic_targets = [None] * self.n_agents
+
+ try:
+ self._obs = self._controller.observe()
+ self.init_units()
+ except (protocol.ProtocolError, protocol.ConnectionError):
+ self.full_restart()
+
+ if self.debug:
+ logging.debug(
+ "Started Episode {}".format(self._episode_count).center(
+ 60, "*"
+ )
+ )
+
+ return self.get_obs(), self.get_state()
+
+ def _restart(self):
+ """Restart the environment by killing all units on the map.
+ There is a trigger in the SC2Map file, which restarts the
+ episode when there are no units left.
+ """
+ try:
+ self._kill_all_units()
+ self._controller.step(2)
+ except (protocol.ProtocolError, protocol.ConnectionError):
+ self.full_restart()
+
+ def full_restart(self):
+ """Full restart. Closes the SC2 process and launches a new one."""
+ self._sc2_proc.close()
+ self._launch()
+ self.force_restarts += 1
+
+ def step(self, actions):
+ """A single environment step. Returns reward, terminated, info."""
+ actions_int = [int(a) for a in actions]
+
+ self.last_action = np.eye(self.n_actions)[np.array(actions_int)]
+
+ # Collect individual actions
+ sc_actions = []
+ if self.debug:
+ logging.debug("Actions".center(60, "-"))
+
+ for a_id, action in enumerate(actions_int):
+ if not self.heuristic_ai:
+ sc_action = self.get_agent_action(a_id, action)
+ else:
+ sc_action, action_num = self.get_agent_action_heuristic(
+ a_id, action
+ )
+ actions[a_id] = action_num
+ if sc_action:
+ sc_actions.append(sc_action)
+
+ # Send action request
+ req_actions = sc_pb.RequestAction(actions=sc_actions)
+ try:
+ self._controller.actions(req_actions)
+ # Make step in SC2, i.e. apply actions
+ self._controller.step(self._step_mul)
+ # Observe here so that we know if the episode is over.
+ self._obs = self._controller.observe()
+ except (protocol.ProtocolError, protocol.ConnectionError):
+ self.full_restart()
+ return 0, True, {}
+
+ self._total_steps += 1
+ self._episode_steps += 1
+
+ # Update units
+ game_end_code = self.update_units()
+
+ terminated = False
+ reward = self.reward_battle()
+ info = {"battle_won": False}
+
+ # count units that are still alive
+ dead_allies, dead_enemies = 0, 0
+ for _al_id, al_unit in self.agents.items():
+ if al_unit.health == 0:
+ dead_allies += 1
+ for _e_id, e_unit in self.enemies.items():
+ if e_unit.health == 0:
+ dead_enemies += 1
+
+ info["dead_allies"] = dead_allies
+ info["dead_enemies"] = dead_enemies
+
+ if game_end_code is not None:
+ # Battle is over
+ terminated = True
+ self.battles_game += 1
+ if game_end_code == 1 and not self.win_counted:
+ self.battles_won += 1
+ self.win_counted = True
+ info["battle_won"] = True
+ if not self.reward_sparse:
+ reward += self.reward_win
+ else:
+ reward = 1
+ elif game_end_code == -1 and not self.defeat_counted:
+ self.defeat_counted = True
+ if not self.reward_sparse:
+ reward += self.reward_defeat
+ else:
+ reward = -1
+
+ elif self._episode_steps >= self.episode_limit:
+ # Episode limit reached
+ terminated = True
+ if self.continuing_episode:
+ info["episode_limit"] = True
+ self.battles_game += 1
+ self.timeouts += 1
+
+ if self.debug:
+ logging.debug("Reward = {}".format(reward).center(60, "-"))
+
+ if terminated:
+ self._episode_count += 1
+
+ if self.reward_scale:
+ reward /= self.max_reward / self.reward_scale_rate
+
+ self.reward = reward
+
+ return reward, terminated, info
+
+ def get_agent_action(self, a_id, action):
+ """Construct the action for agent a_id."""
+ avail_actions = self.get_avail_agent_actions(a_id)
+ assert (
+ avail_actions[action] == 1
+ ), "Agent {} cannot perform action {}".format(a_id, action)
+
+ unit = self.get_unit_by_id(a_id)
+ tag = unit.tag
+ x = unit.pos.x
+ y = unit.pos.y
+
+ if action == 0:
+ # no-op (valid only when dead)
+ assert unit.health == 0, "No-op only available for dead agents."
+ if self.debug:
+ logging.debug("Agent {}: Dead".format(a_id))
+ return None
+ elif action == 1:
+ # stop
+ cmd = r_pb.ActionRawUnitCommand(
+ ability_id=actions["stop"],
+ unit_tags=[tag],
+ queue_command=False,
+ )
+ if self.debug:
+ logging.debug("Agent {}: Stop".format(a_id))
+
+ elif action == 2:
+ # move north
+ cmd = r_pb.ActionRawUnitCommand(
+ ability_id=actions["move"],
+ target_world_space_pos=sc_common.Point2D(
+ x=x, y=y + self._move_amount
+ ),
+ unit_tags=[tag],
+ queue_command=False,
+ )
+ if self.debug:
+ logging.debug("Agent {}: Move North".format(a_id))
+
+ elif action == 3:
+ # move south
+ cmd = r_pb.ActionRawUnitCommand(
+ ability_id=actions["move"],
+ target_world_space_pos=sc_common.Point2D(
+ x=x, y=y - self._move_amount
+ ),
+ unit_tags=[tag],
+ queue_command=False,
+ )
+ if self.debug:
+ logging.debug("Agent {}: Move South".format(a_id))
+
+ elif action == 4:
+ # move east
+ cmd = r_pb.ActionRawUnitCommand(
+ ability_id=actions["move"],
+ target_world_space_pos=sc_common.Point2D(
+ x=x + self._move_amount, y=y
+ ),
+ unit_tags=[tag],
+ queue_command=False,
+ )
+ if self.debug:
+ logging.debug("Agent {}: Move East".format(a_id))
+
+ elif action == 5:
+ # move west
+ cmd = r_pb.ActionRawUnitCommand(
+ ability_id=actions["move"],
+ target_world_space_pos=sc_common.Point2D(
+ x=x - self._move_amount, y=y
+ ),
+ unit_tags=[tag],
+ queue_command=False,
+ )
+ if self.debug:
+ logging.debug("Agent {}: Move West".format(a_id))
+ else:
+ # attack/heal units that are in range
+ target_id = action - self.n_actions_no_attack
+ if self.map_type == "MMM" and unit.unit_type == self.medivac_id:
+ target_unit = self.agents[target_id]
+ action_name = "heal"
+ else:
+ target_unit = self.enemies[target_id]
+ action_name = "attack"
+
+ action_id = actions[action_name]
+ target_tag = target_unit.tag
+
+ cmd = r_pb.ActionRawUnitCommand(
+ ability_id=action_id,
+ target_unit_tag=target_tag,
+ unit_tags=[tag],
+ queue_command=False,
+ )
+
+ if self.debug:
+ logging.debug(
+ "Agent {} {}s unit # {}".format(
+ a_id, action_name, target_id
+ )
+ )
+
+ sc_action = sc_pb.Action(action_raw=r_pb.ActionRaw(unit_command=cmd))
+ return sc_action
+
+ def get_agent_action_heuristic(self, a_id, action):
+ unit = self.get_unit_by_id(a_id)
+ tag = unit.tag
+
+ target = self.heuristic_targets[a_id]
+ if unit.unit_type == self.medivac_id:
+ if (
+ target is None
+ or self.agents[target].health == 0
+ or self.agents[target].health == self.agents[target].health_max
+ ):
+ min_dist = math.hypot(self.max_distance_x, self.max_distance_y)
+ min_id = -1
+ for al_id, al_unit in self.agents.items():
+ if al_unit.unit_type == self.medivac_id:
+ continue
+ if (
+ al_unit.health != 0
+ and al_unit.health != al_unit.health_max
+ ):
+ dist = self.distance(
+ unit.pos.x,
+ unit.pos.y,
+ al_unit.pos.x,
+ al_unit.pos.y,
+ )
+ if dist < min_dist:
+ min_dist = dist
+ min_id = al_id
+ self.heuristic_targets[a_id] = min_id
+ if min_id == -1:
+ self.heuristic_targets[a_id] = None
+ return None, 0
+ action_id = actions["heal"]
+ target_tag = self.agents[self.heuristic_targets[a_id]].tag
+ else:
+ if target is None or self.enemies[target].health == 0:
+ min_dist = math.hypot(self.max_distance_x, self.max_distance_y)
+ min_id = -1
+ for e_id, e_unit in self.enemies.items():
+ if (
+ unit.unit_type == self.marauder_id
+ and e_unit.unit_type == self.medivac_id
+ ):
+ continue
+ if e_unit.health > 0:
+ dist = self.distance(
+ unit.pos.x, unit.pos.y, e_unit.pos.x, e_unit.pos.y
+ )
+ if dist < min_dist:
+ min_dist = dist
+ min_id = e_id
+ self.heuristic_targets[a_id] = min_id
+ if min_id == -1:
+ self.heuristic_targets[a_id] = None
+ return None, 0
+ action_id = actions["attack"]
+ target_tag = self.enemies[self.heuristic_targets[a_id]].tag
+
+ action_num = self.heuristic_targets[a_id] + self.n_actions_no_attack
+
+ # Check if the action is available
+ if (
+ self.heuristic_rest
+ and self.get_avail_agent_actions(a_id)[action_num] == 0
+ ):
+
+ # Move towards the target rather than attacking/healing
+ if unit.unit_type == self.medivac_id:
+ target_unit = self.agents[self.heuristic_targets[a_id]]
+ else:
+ target_unit = self.enemies[self.heuristic_targets[a_id]]
+
+ delta_x = target_unit.pos.x - unit.pos.x
+ delta_y = target_unit.pos.y - unit.pos.y
+
+ if abs(delta_x) > abs(delta_y): # east or west
+ if delta_x > 0: # east
+ target_pos = sc_common.Point2D(
+ x=unit.pos.x + self._move_amount, y=unit.pos.y
+ )
+ action_num = 4
+ else: # west
+ target_pos = sc_common.Point2D(
+ x=unit.pos.x - self._move_amount, y=unit.pos.y
+ )
+ action_num = 5
+ else: # north or south
+ if delta_y > 0: # north
+ target_pos = sc_common.Point2D(
+ x=unit.pos.x, y=unit.pos.y + self._move_amount
+ )
+ action_num = 2
+ else: # south
+ target_pos = sc_common.Point2D(
+ x=unit.pos.x, y=unit.pos.y - self._move_amount
+ )
+ action_num = 3
+
+ cmd = r_pb.ActionRawUnitCommand(
+ ability_id=actions["move"],
+ target_world_space_pos=target_pos,
+ unit_tags=[tag],
+ queue_command=False,
+ )
+ else:
+ # Attack/heal the target
+ cmd = r_pb.ActionRawUnitCommand(
+ ability_id=action_id,
+ target_unit_tag=target_tag,
+ unit_tags=[tag],
+ queue_command=False,
+ )
+
+ sc_action = sc_pb.Action(action_raw=r_pb.ActionRaw(unit_command=cmd))
+ return sc_action, action_num
+
+ def reward_battle(self):
+ """Reward function when self.reward_spare==False.
+ Returns accumulative hit/shield point damage dealt to the enemy
+ + reward_death_value per enemy unit killed, and, in case
+ self.reward_only_positive == False, - (damage dealt to ally units
+ + reward_death_value per ally unit killed) * self.reward_negative_scale
+ """
+ if self.reward_sparse:
+ return 0
+
+ reward = 0
+ delta_deaths = 0
+ delta_ally = 0
+ delta_enemy = 0
+
+ neg_scale = self.reward_negative_scale
+
+ # update deaths
+ for al_id, al_unit in self.agents.items():
+ if not self.death_tracker_ally[al_id]:
+ # did not die so far
+ prev_health = (
+ self.previous_ally_units[al_id].health
+ + self.previous_ally_units[al_id].shield
+ )
+ if al_unit.health == 0:
+ # just died
+ self.death_tracker_ally[al_id] = 1
+ if not self.reward_only_positive:
+ delta_deaths -= self.reward_death_value * neg_scale
+ delta_ally += prev_health * neg_scale
+ else:
+ # still alive
+ delta_ally += neg_scale * (
+ prev_health - al_unit.health - al_unit.shield
+ )
+
+ for e_id, e_unit in self.enemies.items():
+ if not self.death_tracker_enemy[e_id]:
+ prev_health = (
+ self.previous_enemy_units[e_id].health
+ + self.previous_enemy_units[e_id].shield
+ )
+ if e_unit.health == 0:
+ self.death_tracker_enemy[e_id] = 1
+ delta_deaths += self.reward_death_value
+ delta_enemy += prev_health
+ else:
+ delta_enemy += prev_health - e_unit.health - e_unit.shield
+
+ if self.reward_only_positive:
+ reward = abs(delta_enemy + delta_deaths) # shield regeneration
+ else:
+ reward = delta_enemy + delta_deaths - delta_ally
+
+ return reward
+
+ def get_total_actions(self):
+ """Returns the total number of actions an agent could ever take."""
+ return self.n_actions
+
+ @staticmethod
+ def distance(x1, y1, x2, y2):
+ """Distance between two points."""
+ return math.hypot(x2 - x1, y2 - y1)
+
+ def unit_shoot_range(self, agent_id):
+ """Returns the shooting range for an agent."""
+ return 6
+
+ def unit_sight_range(self, agent_id):
+ """Returns the sight range for an agent."""
+ return 9
+
+ def unit_max_cooldown(self, unit):
+ """Returns the maximal cooldown for a unit."""
+ switcher = {
+ self.marine_id: 15,
+ self.marauder_id: 25,
+ self.medivac_id: 200, # max energy
+ self.stalker_id: 35,
+ self.zealot_id: 22,
+ self.colossus_id: 24,
+ self.hydralisk_id: 10,
+ self.zergling_id: 11,
+ self.baneling_id: 1,
+ }
+ return switcher.get(unit.unit_type, 15)
+
+ def save_replay(self):
+ """Save a replay."""
+ prefix = self.replay_prefix or self.map_name
+ replay_dir = self.replay_dir or ""
+ replay_path = self._run_config.save_replay(
+ self._controller.save_replay(),
+ replay_dir=replay_dir,
+ prefix=prefix,
+ )
+ logging.info("Replay saved at: %s" % replay_path)
+
+ def unit_max_shield(self, unit):
+ """Returns maximal shield for a given unit."""
+ if unit.unit_type == 74 or unit.unit_type == self.stalker_id:
+ return 80 # Protoss's Stalker
+ if unit.unit_type == 73 or unit.unit_type == self.zealot_id:
+ return 50 # Protoss's Zaelot
+ if unit.unit_type == 4 or unit.unit_type == self.colossus_id:
+ return 150 # Protoss's Colossus
+
+ def can_move(self, unit, direction):
+ """Whether a unit can move in a given direction."""
+ m = self._move_amount / 2
+
+ if direction == Direction.NORTH:
+ x, y = int(unit.pos.x), int(unit.pos.y + m)
+ elif direction == Direction.SOUTH:
+ x, y = int(unit.pos.x), int(unit.pos.y - m)
+ elif direction == Direction.EAST:
+ x, y = int(unit.pos.x + m), int(unit.pos.y)
+ else:
+ x, y = int(unit.pos.x - m), int(unit.pos.y)
+
+ if self.check_bounds(x, y) and self.pathing_grid[x, y]:
+ return True
+
+ return False
+
+ def get_surrounding_points(self, unit, include_self=False):
+ """Returns the surrounding points of the unit in 8 directions."""
+ x = int(unit.pos.x)
+ y = int(unit.pos.y)
+
+ ma = self._move_amount
+
+ points = [
+ (x, y + 2 * ma),
+ (x, y - 2 * ma),
+ (x + 2 * ma, y),
+ (x - 2 * ma, y),
+ (x + ma, y + ma),
+ (x - ma, y - ma),
+ (x + ma, y - ma),
+ (x - ma, y + ma),
+ ]
+
+ if include_self:
+ points.append((x, y))
+
+ return points
+
+ def check_bounds(self, x, y):
+ """Whether a point is within the map bounds."""
+ return 0 <= x < self.map_x and 0 <= y < self.map_y
+
+ def get_surrounding_pathing(self, unit):
+ """Returns pathing values of the grid surrounding the given unit."""
+ points = self.get_surrounding_points(unit, include_self=False)
+ vals = [
+ self.pathing_grid[x, y] if self.check_bounds(x, y) else 1
+ for x, y in points
+ ]
+ return vals
+
+ def get_surrounding_height(self, unit):
+ """Returns height values of the grid surrounding the given unit."""
+ points = self.get_surrounding_points(unit, include_self=True)
+ vals = [
+ self.terrain_height[x, y] if self.check_bounds(x, y) else 1
+ for x, y in points
+ ]
+ return vals
+
+ def get_obs_agent(self, agent_id):
+ """Returns observation for agent_id. The observation is composed of:
+
+ - agent movement features (where it can move to, height information
+ and pathing grid)
+ - enemy features (available_to_attack, health, relative_x, relative_y,
+ shield, unit_type)
+ - ally features (visible, distance, relative_x, relative_y, shield,
+ unit_type)
+ - agent unit features (health, shield, unit_type)
+
+ All of this information is flattened and concatenated into a list,
+ in the aforementioned order. To know the sizes of each of the
+ features inside the final list of features, take a look at the
+ functions ``get_obs_move_feats_size()``,
+ ``get_obs_enemy_feats_size()``, ``get_obs_ally_feats_size()`` and
+ ``get_obs_own_feats_size()``.
+
+ The size of the observation vector may vary, depending on the
+ environment configuration and type of units present in the map.
+ For instance, non-Protoss units will not have shields, movement
+ features may or may not include terrain height and pathing grid,
+ unit_type is not included if there is only one type of unit in the
+ map etc.).
+
+ NOTE: Agents should have access only to their local observations
+ during decentralised execution.
+ """
+ unit = self.get_unit_by_id(agent_id)
+
+ move_feats_dim = self.get_obs_move_feats_size()
+ enemy_feats_dim = self.get_obs_enemy_feats_size()
+ ally_feats_dim = self.get_obs_ally_feats_size()
+ own_feats_dim = self.get_obs_own_feats_size()
+
+ move_feats = np.zeros(move_feats_dim, dtype=np.float32)
+ enemy_feats = np.zeros(enemy_feats_dim, dtype=np.float32)
+ ally_feats = np.zeros(ally_feats_dim, dtype=np.float32)
+ own_feats = np.zeros(own_feats_dim, dtype=np.float32)
+
+ if unit.health > 0: # otherwise dead, return all zeros
+ x = unit.pos.x
+ y = unit.pos.y
+ sight_range = self.unit_sight_range(agent_id)
+
+ # Movement features
+ avail_actions = self.get_avail_agent_actions(agent_id)
+ for m in range(self.n_actions_move):
+ move_feats[m] = avail_actions[m + 2]
+
+ ind = self.n_actions_move
+
+ if self.obs_pathing_grid:
+ move_feats[
+ ind : ind + self.n_obs_pathing # noqa
+ ] = self.get_surrounding_pathing(unit)
+ ind += self.n_obs_pathing
+
+ if self.obs_terrain_height:
+ move_feats[ind:] = self.get_surrounding_height(unit)
+
+ # Enemy features
+ for e_id, e_unit in self.enemies.items():
+ e_x = e_unit.pos.x
+ e_y = e_unit.pos.y
+ dist = self.distance(x, y, e_x, e_y)
+
+ if (
+ dist < sight_range and e_unit.health > 0
+ ): # visible and alive
+ # Sight range > shoot range
+ enemy_feats[e_id, 0] = avail_actions[
+ self.n_actions_no_attack + e_id
+ ] # available
+ enemy_feats[e_id, 1] = dist / sight_range # distance
+ enemy_feats[e_id, 2] = (
+ e_x - x
+ ) / sight_range # relative X
+ enemy_feats[e_id, 3] = (
+ e_y - y
+ ) / sight_range # relative Y
+
+ ind = 4
+ if self.obs_all_health:
+ enemy_feats[e_id, ind] = (
+ e_unit.health / e_unit.health_max
+ ) # health
+ ind += 1
+ if self.shield_bits_enemy > 0:
+ max_shield = self.unit_max_shield(e_unit)
+ enemy_feats[e_id, ind] = (
+ e_unit.shield / max_shield
+ ) # shield
+ ind += 1
+
+ if self.unit_type_bits > 0:
+ type_id = self.get_unit_type_id(e_unit, False)
+ enemy_feats[e_id, ind + type_id] = 1 # unit type
+
+ # Ally features
+ al_ids = [
+ al_id for al_id in range(self.n_agents) if al_id != agent_id
+ ]
+ for i, al_id in enumerate(al_ids):
+
+ al_unit = self.get_unit_by_id(al_id)
+ al_x = al_unit.pos.x
+ al_y = al_unit.pos.y
+ dist = self.distance(x, y, al_x, al_y)
+
+ if (
+ dist < sight_range and al_unit.health > 0
+ ): # visible and alive
+ ally_feats[i, 0] = 1 # visible
+ ally_feats[i, 1] = dist / sight_range # distance
+ ally_feats[i, 2] = (al_x - x) / sight_range # relative X
+ ally_feats[i, 3] = (al_y - y) / sight_range # relative Y
+
+ ind = 4
+ if self.obs_all_health:
+ ally_feats[i, ind] = (
+ al_unit.health / al_unit.health_max
+ ) # health
+ ind += 1
+ if self.shield_bits_ally > 0:
+ max_shield = self.unit_max_shield(al_unit)
+ ally_feats[i, ind] = (
+ al_unit.shield / max_shield
+ ) # shield
+ ind += 1
+
+ if self.unit_type_bits > 0:
+ type_id = self.get_unit_type_id(al_unit, True)
+ ally_feats[i, ind + type_id] = 1
+ ind += self.unit_type_bits
+
+ if self.obs_last_action:
+ ally_feats[i, ind:] = self.last_action[al_id]
+
+ # Own features
+ ind = 0
+ if self.obs_own_health:
+ own_feats[ind] = unit.health / unit.health_max
+ ind += 1
+ if self.shield_bits_ally > 0:
+ max_shield = self.unit_max_shield(unit)
+ own_feats[ind] = unit.shield / max_shield
+ ind += 1
+
+ if self.unit_type_bits > 0:
+ type_id = self.get_unit_type_id(unit, True)
+ own_feats[ind + type_id] = 1
+
+ agent_obs = np.concatenate(
+ (
+ move_feats.flatten(),
+ enemy_feats.flatten(),
+ ally_feats.flatten(),
+ own_feats.flatten(),
+ )
+ )
+
+ if self.obs_timestep_number:
+ agent_obs = np.append(
+ agent_obs, self._episode_steps / self.episode_limit
+ )
+
+ if self.debug:
+ logging.debug("Obs Agent: {}".format(agent_id).center(60, "-"))
+ logging.debug(
+ "Avail. actions {}".format(
+ self.get_avail_agent_actions(agent_id)
+ )
+ )
+ logging.debug("Move feats {}".format(move_feats))
+ logging.debug("Enemy feats {}".format(enemy_feats))
+ logging.debug("Ally feats {}".format(ally_feats))
+ logging.debug("Own feats {}".format(own_feats))
+
+ return agent_obs
+
+ def get_obs(self):
+ """Returns all agent observations in a list.
+ NOTE: Agents should have access only to their local observations
+ during decentralised execution.
+ """
+ agents_obs = [self.get_obs_agent(i) for i in range(self.n_agents)]
+ return agents_obs
+
+ def get_state(self):
+ """Returns the global state.
+ NOTE: This functon should not be used during decentralised execution.
+ """
+ if self.obs_instead_of_state:
+ obs_concat = np.concatenate(self.get_obs(), axis=0).astype(
+ np.float32
+ )
+ return obs_concat
+
+ state_dict = self.get_state_dict()
+
+ state = np.append(
+ state_dict["allies"].flatten(), state_dict["enemies"].flatten()
+ )
+ if "last_action" in state_dict:
+ state = np.append(state, state_dict["last_action"].flatten())
+ if "timestep" in state_dict:
+ state = np.append(state, state_dict["timestep"])
+
+ state = state.astype(dtype=np.float32)
+
+ if self.debug:
+ logging.debug("STATE".center(60, "-"))
+ logging.debug("Ally state {}".format(state_dict["allies"]))
+ logging.debug("Enemy state {}".format(state_dict["enemies"]))
+ if self.state_last_action:
+ logging.debug("Last actions {}".format(self.last_action))
+
+ return state
+
+ def get_ally_num_attributes(self):
+ return len(self.ally_state_attr_names)
+
+ def get_enemy_num_attributes(self):
+ return len(self.enemy_state_attr_names)
+
+ def get_state_dict(self):
+ """Returns the global state as a dictionary.
+
+ - allies: numpy array containing agents and their attributes
+ - enemies: numpy array containing enemies and their attributes
+ - last_action: numpy array of previous actions for each agent
+ - timestep: current no. of steps divided by total no. of steps
+
+ NOTE: This function should not be used during decentralised execution.
+ """
+
+ # number of features equals the number of attribute names
+ nf_al = self.get_ally_num_attributes()
+ nf_en = self.get_enemy_num_attributes()
+
+ ally_state = np.zeros((self.n_agents, nf_al))
+ enemy_state = np.zeros((self.n_enemies, nf_en))
+
+ center_x = self.map_x / 2
+ center_y = self.map_y / 2
+
+ for al_id, al_unit in self.agents.items():
+ if al_unit.health > 0:
+ x = al_unit.pos.x
+ y = al_unit.pos.y
+ max_cd = self.unit_max_cooldown(al_unit)
+
+ ally_state[al_id, 0] = (
+ al_unit.health / al_unit.health_max
+ ) # health
+ if (
+ self.map_type == "MMM"
+ and al_unit.unit_type == self.medivac_id
+ ):
+ ally_state[al_id, 1] = al_unit.energy / max_cd # energy
+ else:
+ ally_state[al_id, 1] = (
+ al_unit.weapon_cooldown / max_cd
+ ) # cooldown
+ ally_state[al_id, 2] = (
+ x - center_x
+ ) / self.max_distance_x # relative X
+ ally_state[al_id, 3] = (
+ y - center_y
+ ) / self.max_distance_y # relative Y
+
+ if self.shield_bits_ally > 0:
+ max_shield = self.unit_max_shield(al_unit)
+ ally_state[al_id, 4] = (
+ al_unit.shield / max_shield
+ ) # shield
+
+ if self.unit_type_bits > 0:
+ type_id = self.get_unit_type_id(al_unit, True)
+ ally_state[al_id, type_id - self.unit_type_bits] = 1
+
+ for e_id, e_unit in self.enemies.items():
+ if e_unit.health > 0:
+ x = e_unit.pos.x
+ y = e_unit.pos.y
+
+ enemy_state[e_id, 0] = (
+ e_unit.health / e_unit.health_max
+ ) # health
+ enemy_state[e_id, 1] = (
+ x - center_x
+ ) / self.max_distance_x # relative X
+ enemy_state[e_id, 2] = (
+ y - center_y
+ ) / self.max_distance_y # relative Y
+
+ if self.shield_bits_enemy > 0:
+ max_shield = self.unit_max_shield(e_unit)
+ enemy_state[e_id, 3] = e_unit.shield / max_shield # shield
+
+ if self.unit_type_bits > 0:
+ type_id = self.get_unit_type_id(e_unit, False)
+ enemy_state[e_id, type_id - self.unit_type_bits] = 1
+
+ state = {"allies": ally_state, "enemies": enemy_state}
+
+ if self.state_last_action:
+ state["last_action"] = self.last_action
+ if self.state_timestep_number:
+ state["timestep"] = self._episode_steps / self.episode_limit
+
+ return state
+
+ def get_obs_enemy_feats_size(self):
+ """Returns the dimensions of the matrix containing enemy features.
+ Size is n_enemies x n_features.
+ """
+ nf_en = 4 + self.unit_type_bits
+
+ if self.obs_all_health:
+ nf_en += 1 + self.shield_bits_enemy
+
+ return self.n_enemies, nf_en
+
+ def get_obs_ally_feats_size(self):
+ """Returns the dimensions of the matrix containing ally features.
+ Size is n_allies x n_features.
+ """
+ nf_al = 4 + self.unit_type_bits
+
+ if self.obs_all_health:
+ nf_al += 1 + self.shield_bits_ally
+
+ if self.obs_last_action:
+ nf_al += self.n_actions
+
+ return self.n_agents - 1, nf_al
+
+ def get_obs_own_feats_size(self):
+ """
+ Returns the size of the vector containing the agents' own features.
+ """
+ own_feats = self.unit_type_bits
+ if self.obs_own_health:
+ own_feats += 1 + self.shield_bits_ally
+ if self.obs_timestep_number:
+ own_feats += 1
+
+ return own_feats
+
+ def get_obs_move_feats_size(self):
+ """Returns the size of the vector containing the agents's movement-
+ related features.
+ """
+ move_feats = self.n_actions_move
+ if self.obs_pathing_grid:
+ move_feats += self.n_obs_pathing
+ if self.obs_terrain_height:
+ move_feats += self.n_obs_height
+
+ return move_feats
+
+ def get_obs_size(self):
+ """Returns the size of the observation."""
+ own_feats = self.get_obs_own_feats_size()
+ move_feats = self.get_obs_move_feats_size()
+
+ n_enemies, n_enemy_feats = self.get_obs_enemy_feats_size()
+ n_allies, n_ally_feats = self.get_obs_ally_feats_size()
+
+ enemy_feats = n_enemies * n_enemy_feats
+ ally_feats = n_allies * n_ally_feats
+
+ return move_feats + enemy_feats + ally_feats + own_feats
+
+ def get_state_size(self):
+ """Returns the size of the global state."""
+ if self.obs_instead_of_state:
+ return self.get_obs_size() * self.n_agents
+
+ nf_al = 4 + self.shield_bits_ally + self.unit_type_bits
+ nf_en = 3 + self.shield_bits_enemy + self.unit_type_bits
+
+ enemy_state = self.n_enemies * nf_en
+ ally_state = self.n_agents * nf_al
+
+ size = enemy_state + ally_state
+
+ if self.state_last_action:
+ size += self.n_agents * self.n_actions
+ if self.state_timestep_number:
+ size += 1
+
+ return size
+
+ def get_visibility_matrix(self):
+ """Returns a boolean numpy array of dimensions
+ (n_agents, n_agents + n_enemies) indicating which units
+ are visible to each agent.
+ """
+ arr = np.zeros(
+ (self.n_agents, self.n_agents + self.n_enemies),
+ dtype=np.bool,
+ )
+
+ for agent_id in range(self.n_agents):
+ current_agent = self.get_unit_by_id(agent_id)
+ if current_agent.health > 0: # it agent not dead
+ x = current_agent.pos.x
+ y = current_agent.pos.y
+ sight_range = self.unit_sight_range(agent_id)
+
+ # Enemies
+ for e_id, e_unit in self.enemies.items():
+ e_x = e_unit.pos.x
+ e_y = e_unit.pos.y
+ dist = self.distance(x, y, e_x, e_y)
+
+ if dist < sight_range and e_unit.health > 0:
+ # visible and alive
+ arr[agent_id, self.n_agents + e_id] = 1
+
+ # The matrix for allies is filled symmetrically
+ al_ids = [
+ al_id for al_id in range(self.n_agents) if al_id > agent_id
+ ]
+ for _, al_id in enumerate(al_ids):
+ al_unit = self.get_unit_by_id(al_id)
+ al_x = al_unit.pos.x
+ al_y = al_unit.pos.y
+ dist = self.distance(x, y, al_x, al_y)
+
+ if dist < sight_range and al_unit.health > 0:
+ # visible and alive
+ arr[agent_id, al_id] = arr[al_id, agent_id] = 1
+
+ return arr
+
+ def get_unit_type_id(self, unit, ally):
+ """Returns the ID of unit type in the given scenario."""
+ if ally: # use new SC2 unit types
+ type_id = unit.unit_type - self._min_unit_type
+ else: # use default SC2 unit types
+ if self.map_type == "stalkers_and_zealots":
+ # id(Stalker) = 74, id(Zealot) = 73
+ type_id = unit.unit_type - 73
+ elif self.map_type == "colossi_stalkers_zealots":
+ # id(Stalker) = 74, id(Zealot) = 73, id(Colossus) = 4
+ if unit.unit_type == 4:
+ type_id = 0
+ elif unit.unit_type == 74:
+ type_id = 1
+ else:
+ type_id = 2
+ elif self.map_type == "bane":
+ if unit.unit_type == 9:
+ type_id = 0
+ else:
+ type_id = 1
+ elif self.map_type == "MMM":
+ if unit.unit_type == 51:
+ type_id = 0
+ elif unit.unit_type == 48:
+ type_id = 1
+ else:
+ type_id = 2
+
+ return type_id
+
+ def get_avail_agent_actions(self, agent_id):
+ """Returns the available actions for agent_id."""
+ unit = self.get_unit_by_id(agent_id)
+ if unit.health > 0:
+ # cannot choose no-op when alive
+ avail_actions = [0] * self.n_actions
+
+ # stop should be allowed
+ avail_actions[1] = 1
+
+ # see if we can move
+ if self.can_move(unit, Direction.NORTH):
+ avail_actions[2] = 1
+ if self.can_move(unit, Direction.SOUTH):
+ avail_actions[3] = 1
+ if self.can_move(unit, Direction.EAST):
+ avail_actions[4] = 1
+ if self.can_move(unit, Direction.WEST):
+ avail_actions[5] = 1
+
+ # Can attack only alive units that are alive in the shooting range
+ shoot_range = self.unit_shoot_range(agent_id)
+
+ target_items = self.enemies.items()
+ if self.map_type == "MMM" and unit.unit_type == self.medivac_id:
+ # Medivacs cannot heal themselves or other flying units
+ target_items = [
+ (t_id, t_unit)
+ for (t_id, t_unit) in self.agents.items()
+ if t_unit.unit_type != self.medivac_id
+ ]
+
+ for t_id, t_unit in target_items:
+ if t_unit.health > 0:
+ dist = self.distance(
+ unit.pos.x, unit.pos.y, t_unit.pos.x, t_unit.pos.y
+ )
+ if dist <= shoot_range:
+ avail_actions[t_id + self.n_actions_no_attack] = 1
+
+ return avail_actions
+
+ else:
+ # only no-op allowed
+ return [1] + [0] * (self.n_actions - 1)
+
+ def get_avail_actions(self):
+ """Returns the available actions of all agents in a list."""
+ avail_actions = []
+ for agent_id in range(self.n_agents):
+ avail_agent = self.get_avail_agent_actions(agent_id)
+ avail_actions.append(avail_agent)
+ return avail_actions
+
+ def close(self):
+ """Close StarCraft II."""
+ if self.renderer is not None:
+ self.renderer.close()
+ self.renderer = None
+ if self._sc2_proc:
+ self._sc2_proc.close()
+
+ def seed(self):
+ """Returns the random seed used by the environment."""
+ return self._seed
+
+ def render(self, mode="human"):
+ if self.renderer is None:
+ from smac.env.starcraft2.render import StarCraft2Renderer
+
+ self.renderer = StarCraft2Renderer(self, mode)
+ assert (
+ mode == self.renderer.mode
+ ), "mode must be consistent across render calls"
+ return self.renderer.render(mode)
+
+ def _kill_all_units(self):
+ """Kill all units on the map."""
+ units_alive = [
+ unit.tag for unit in self.agents.values() if unit.health > 0
+ ] + [unit.tag for unit in self.enemies.values() if unit.health > 0]
+ debug_command = [
+ d_pb.DebugCommand(kill_unit=d_pb.DebugKillUnit(tag=units_alive))
+ ]
+ self._controller.debug(debug_command)
+
+ def init_units(self):
+ """Initialise the units."""
+ while True:
+ # Sometimes not all units have yet been created by SC2
+ self.agents = {}
+ self.enemies = {}
+
+ ally_units = [
+ unit
+ for unit in self._obs.observation.raw_data.units
+ if unit.owner == 1
+ ]
+ ally_units_sorted = sorted(
+ ally_units,
+ key=attrgetter("unit_type", "pos.x", "pos.y"),
+ reverse=False,
+ )
+
+ for i in range(len(ally_units_sorted)):
+ self.agents[i] = ally_units_sorted[i]
+ if self.debug:
+ logging.debug(
+ "Unit {} is {}, x = {}, y = {}".format(
+ len(self.agents),
+ self.agents[i].unit_type,
+ self.agents[i].pos.x,
+ self.agents[i].pos.y,
+ )
+ )
+
+ for unit in self._obs.observation.raw_data.units:
+ if unit.owner == 2:
+ self.enemies[len(self.enemies)] = unit
+ if self._episode_count == 0:
+ self.max_reward += unit.health_max + unit.shield_max
+
+ if self._episode_count == 0:
+ min_unit_type = min(
+ unit.unit_type for unit in self.agents.values()
+ )
+ self._init_ally_unit_types(min_unit_type)
+
+ all_agents_created = len(self.agents) == self.n_agents
+ all_enemies_created = len(self.enemies) == self.n_enemies
+
+ self._unit_types = [
+ unit.unit_type for unit in ally_units_sorted
+ ] + [
+ unit.unit_type
+ for unit in self._obs.observation.raw_data.units
+ if unit.owner == 2
+ ]
+
+ if all_agents_created and all_enemies_created: # all good
+ return
+
+ try:
+ self._controller.step(1)
+ self._obs = self._controller.observe()
+ except (protocol.ProtocolError, protocol.ConnectionError):
+ self.full_restart()
+ self.reset()
+
+ def get_unit_types(self):
+ if self._unit_types is None:
+ warn(
+ "unit types have not been initialized yet, please call"
+ "env.reset() to populate this and call t1286he method again."
+ )
+
+ return self._unit_types
+
+ def update_units(self):
+ """Update units after an environment step.
+ This function assumes that self._obs is up-to-date.
+ """
+ n_ally_alive = 0
+ n_enemy_alive = 0
+
+ # Store previous state
+ self.previous_ally_units = deepcopy(self.agents)
+ self.previous_enemy_units = deepcopy(self.enemies)
+
+ for al_id, al_unit in self.agents.items():
+ updated = False
+ for unit in self._obs.observation.raw_data.units:
+ if al_unit.tag == unit.tag:
+ self.agents[al_id] = unit
+ updated = True
+ n_ally_alive += 1
+ break
+
+ if not updated: # dead
+ al_unit.health = 0
+
+ for e_id, e_unit in self.enemies.items():
+ updated = False
+ for unit in self._obs.observation.raw_data.units:
+ if e_unit.tag == unit.tag:
+ self.enemies[e_id] = unit
+ updated = True
+ n_enemy_alive += 1
+ break
+
+ if not updated: # dead
+ e_unit.health = 0
+
+ if (
+ n_ally_alive == 0
+ and n_enemy_alive > 0
+ or self.only_medivac_left(ally=True)
+ ):
+ return -1 # lost
+ if (
+ n_ally_alive > 0
+ and n_enemy_alive == 0
+ or self.only_medivac_left(ally=False)
+ ):
+ return 1 # won
+ if n_ally_alive == 0 and n_enemy_alive == 0:
+ return 0
+
+ return None
+
+ def _init_ally_unit_types(self, min_unit_type):
+ """Initialise ally unit types. Should be called once from the
+ init_units function.
+ """
+ self._min_unit_type = min_unit_type
+ if self.map_type == "marines":
+ self.marine_id = min_unit_type
+ elif self.map_type == "stalkers_and_zealots":
+ self.stalker_id = min_unit_type
+ self.zealot_id = min_unit_type + 1
+ elif self.map_type == "colossi_stalkers_zealots":
+ self.colossus_id = min_unit_type
+ self.stalker_id = min_unit_type + 1
+ self.zealot_id = min_unit_type + 2
+ elif self.map_type == "MMM":
+ self.marauder_id = min_unit_type
+ self.marine_id = min_unit_type + 1
+ self.medivac_id = min_unit_type + 2
+ elif self.map_type == "zealots":
+ self.zealot_id = min_unit_type
+ elif self.map_type == "hydralisks":
+ self.hydralisk_id = min_unit_type
+ elif self.map_type == "stalkers":
+ self.stalker_id = min_unit_type
+ elif self.map_type == "colossus":
+ self.colossus_id = min_unit_type
+ elif self.map_type == "bane":
+ self.baneling_id = min_unit_type
+ self.zergling_id = min_unit_type + 1
+
+ def only_medivac_left(self, ally):
+ """Check if only Medivac units are left."""
+ if self.map_type != "MMM":
+ return False
+
+ if ally:
+ units_alive = [
+ a
+ for a in self.agents.values()
+ if (a.health > 0 and a.unit_type != self.medivac_id)
+ ]
+ if len(units_alive) == 0:
+ return True
+ return False
+ else:
+ units_alive = [
+ a
+ for a in self.enemies.values()
+ if (a.health > 0 and a.unit_type != self.medivac_id)
+ ]
+ if len(units_alive) == 1 and units_alive[0].unit_type == 54:
+ return True
+ return False
+
+ def get_unit_by_id(self, a_id):
+ """Get unit by ID."""
+ return self.agents[a_id]
+
+ def get_stats(self):
+ stats = {
+ "battles_won": self.battles_won,
+ "battles_game": self.battles_game,
+ "battles_draw": self.timeouts,
+ "win_rate": self.battles_won / self.battles_game,
+ "timeouts": self.timeouts,
+ "restarts": self.force_restarts,
+ }
+ return stats
+
+ def get_env_info(self):
+ env_info = super().get_env_info()
+ env_info["agent_features"] = self.ally_state_attr_names
+ env_info["enemy_features"] = self.enemy_state_attr_names
+ return env_info
\ No newline at end of file
diff --git a/src/envs/smac_v2/README_tju.md b/src/envs/smac_v2/README_tju.md
new file mode 100644
index 0000000..e59ee40
--- /dev/null
+++ b/src/envs/smac_v2/README_tju.md
@@ -0,0 +1,5 @@
+
+# Instructions
+
+## Firstly, install SMACv2 according to https://github.com/oxwhirl/smacv2
+* Note that make sure to put the maps in `smac/env/starcraft2/maps/SMAC_Maps` to
diff --git a/src/envs/smac_v2/StarCraft2Env2Wrapper.py b/src/envs/smac_v2/StarCraft2Env2Wrapper.py
new file mode 100644
index 0000000..21fbf21
--- /dev/null
+++ b/src/envs/smac_v2/StarCraft2Env2Wrapper.py
@@ -0,0 +1,145 @@
+#!/usr/bin/env python
+# -*- coding: UTF-8 -*-
+'''
+@Project :API-Network
+@File :StarCraft2EnvWrapper.py
+@Author :Hao Xiaotian
+@Date :2022/6/13 16:26
+'''
+
+from .official.wrapper import StarCraftCapabilityEnvWrapper
+
+
+class StarCraft2Env2Wrapper(StarCraftCapabilityEnvWrapper):
+
+ # Add new functions to support permutation operation
+ def get_obs_component(self):
+ move_feats_dim = self.env.get_obs_move_feats_size()
+ enemy_feats_dim = self.env.get_obs_enemy_feats_size()
+ ally_feats_dim = self.env.get_obs_ally_feats_size()
+ own_feats_dim = self.env.get_obs_own_feats_size()
+ obs_component = [move_feats_dim, enemy_feats_dim, ally_feats_dim, own_feats_dim]
+ return obs_component
+
+ def get_state_component(self):
+ if self.env.obs_instead_of_state:
+ return [self.env.get_obs_size()] * self.env.n_agents
+
+ nf_al = self.env.get_ally_num_attributes()
+ nf_en = self.env.get_enemy_num_attributes()
+
+ enemy_state = self.env.n_enemies * nf_en
+ ally_state = self.env.n_agents * nf_al
+
+ size = [ally_state, enemy_state]
+
+ if self.env.state_last_action:
+ size.append(self.env.n_agents * self.env.n_actions)
+ if self.env.state_timestep_number:
+ size.append(1)
+ return size
+
+ def get_env_info(self):
+ env_info = {
+ "state_shape": self.get_state_size(),
+ "obs_shape": self.get_obs_size(),
+ "n_actions": self.get_total_actions(),
+ "n_agents": self.env.n_agents,
+ "n_enemies": self.env.n_enemies,
+ "episode_limit": self.env.episode_limit,
+
+ # New features we added.
+ "n_normal_actions": self.env.n_actions_no_attack,
+ "n_allies": self.env.n_agents - 1,
+ "state_ally_feats_size": self.env.get_ally_num_attributes(),
+ "state_enemy_feats_size": self.env.get_enemy_num_attributes(),
+ "obs_component": self.get_obs_component(),
+ "state_component": self.get_state_component(),
+ "map_type": self.env.map_type,
+ }
+ print(env_info)
+ return env_info
+
+ def _get_medivac_ids(self):
+ medivac_ids = []
+ for al_id, al_unit in self.env.agents.items():
+ if self.env.map_type == "MMM" and al_unit.unit_type == self.env.medivac_id:
+ medivac_ids.append(al_id)
+ print(medivac_ids) # [9]
+ return medivac_ids
+
+ # def reward_battle(self):
+ # """Reward function when self.reward_spare==False.
+ #
+ # Fix the **REWARD FUNCTION BUG** of the original starcraft2.py.
+ #
+ # We carefully check the code and indeed find some code error in starcraft2.py.
+ # The error is caused by the incorrect reward calculation for the shield regeneration process and this error will
+ # only occur for scenarios where the enemies are Protoss units.
+ #
+ # (1) At line 717 of reward_battle() of starcraft2.py, the reward is computed as: reward = abs(delta_enemy).
+ # Normally, when the agents attack the enemies, delta_enemy will > 0 and thus the agents will be rewarded for attacking enemies.
+ #
+ # (2) For Protoss enemies, delta_enemy can < 0 due to the shield regeneration. However, due to the abs() taken over delta_enemy,
+ # the agents will still be rewarded when the enemies' shields regenerate. This incorrect reward will lead to undesired behaviors,
+ # e.g., attacking the enemies but not killing them and waiting their shields regenerating.
+ #
+ # (3) Due to the PI/PE design and the improved representational capacity, HPN-QMIX is more sensitive to such
+ # incorrect rewards and sometimes learn strange behaviors.
+ #
+ # Returns accumulative hit/shield point damage dealt to the enemy
+ # + reward_death_value per enemy unit killed, and, in case
+ # self.reward_only_positive == False, - (damage dealt to ally units
+ # + reward_death_value per ally unit killed) * self.reward_negative_scale
+ # """
+ # if self.reward_sparse:
+ # return 0
+ #
+ # reward = 0
+ # delta_deaths = 0
+ # delta_ally = 0
+ # delta_enemy = 0
+ #
+ # neg_scale = self.reward_negative_scale
+ #
+ # # update deaths
+ # for al_id, al_unit in self.agents.items():
+ # if not self.death_tracker_ally[al_id]:
+ # # did not die so far
+ # prev_health = (
+ # self.previous_ally_units[al_id].health
+ # + self.previous_ally_units[al_id].shield
+ # )
+ # if al_unit.health == 0:
+ # # just died
+ # self.death_tracker_ally[al_id] = 1
+ # if not self.reward_only_positive:
+ # delta_deaths -= self.reward_death_value * neg_scale
+ # delta_ally += prev_health * neg_scale
+ # else:
+ # # still alive
+ # delta_ally += neg_scale * (
+ # prev_health - al_unit.health - al_unit.shield
+ # )
+ #
+ # for e_id, e_unit in self.enemies.items():
+ # if not self.death_tracker_enemy[e_id]:
+ # prev_health = (
+ # self.previous_enemy_units[e_id].health
+ # + self.previous_enemy_units[e_id].shield
+ # )
+ # if e_unit.health == 0:
+ # self.death_tracker_enemy[e_id] = 1
+ # delta_deaths += self.reward_death_value
+ # delta_enemy += prev_health
+ # else:
+ # delta_enemy += prev_health - e_unit.health - e_unit.shield
+ #
+ # if self.reward_only_positive:
+ # ###### reward = abs(delta_enemy + delta_deaths) # shield regeneration (the original wrong implementation)
+ # # reward = max(delta_enemy, 0) + delta_deaths # only consider the shield damage
+ # reward = delta_enemy + delta_deaths # consider the `+shield-damage` and the `-shield-regeneration`
+ # else:
+ # reward = delta_enemy + delta_deaths - delta_ally
+ #
+ # return reward
diff --git a/src/envs/smac_v2/__init__.py b/src/envs/smac_v2/__init__.py
new file mode 100644
index 0000000..72364d0
--- /dev/null
+++ b/src/envs/smac_v2/__init__.py
@@ -0,0 +1 @@
+from .StarCraft2Env2Wrapper import StarCraft2Env2Wrapper
\ No newline at end of file
diff --git a/src/envs/smac_v2/official/__init__.py b/src/envs/smac_v2/official/__init__.py
new file mode 100644
index 0000000..4e7fdbf
--- /dev/null
+++ b/src/envs/smac_v2/official/__init__.py
@@ -0,0 +1,4 @@
+from absl import flags
+
+FLAGS = flags.FLAGS
+FLAGS(["main.py"])
diff --git a/src/envs/smac_v2/official/distributions.py b/src/envs/smac_v2/official/distributions.py
new file mode 100644
index 0000000..9d08f82
--- /dev/null
+++ b/src/envs/smac_v2/official/distributions.py
@@ -0,0 +1,329 @@
+from abc import ABC, abstractmethod, abstractproperty
+from copy import deepcopy
+from typing import Any, Dict
+from itertools import combinations_with_replacement
+from random import choice, shuffle
+from math import inf
+from numpy.random import default_rng
+import numpy as np
+
+
+class Distribution(ABC):
+ @abstractmethod
+ def generate(self) -> Dict[str, Any]:
+ pass
+
+ @property
+ @abstractproperty
+ def n_tasks(self) -> int:
+ pass
+
+
+DISTRIBUTION_MAP = {}
+
+
+def get_distribution(key):
+ return DISTRIBUTION_MAP[key]
+
+
+def register_distribution(key, cls):
+ DISTRIBUTION_MAP[key] = cls
+
+
+class FixedDistribution(Distribution):
+ """A generic disribution that draws from a fixed list.
+ May operate in test mode, where items are drawn sequentially,
+ or train mode where items are drawn randomly. Example uses of this
+ are for team generation or per-agent accuracy generation in SMAC by
+ drawing from separate fixed lists at test and train time.
+ """
+
+ def __init__(self, config):
+ """
+ Args:
+ config (dict): Must contain `env_key`, `test_mode` and `items`
+ entries. `env_key` is the key to pass to the environment so that it
+ recognises what to do with the list. `test_mode` controls the sampling
+ behaviour (sequential if true, uniform at random if false), `items`
+ is the list of items (team configurations/accuracies etc.) to sample from.
+ """
+ self.config = config
+ self.env_key = config["env_key"]
+ self.test_mode = config["test_mode"]
+ self.teams = config["items"]
+ self.index = 0
+
+ def generate(self) -> Dict[str, Dict[str, Any]]:
+ """Returns:
+ Dict: Returns a dict of the form
+ {self.env_key: {"item": - , "id": }}
+ """
+ if self.test_mode:
+ team = self.teams[self.index]
+ team_id = self.index
+ self.index = (self.index + 1) % len(self.teams)
+ shuffle(team)
+ return {self.env_key: {"item": team, "id": team_id}}
+ else:
+ team = choice(self.teams)
+ team_id = self.teams.index(team)
+ shuffle(team)
+ return {self.env_key: {"item": team, "id": team_id}}
+
+ @property
+ def n_tasks(self):
+ return len(self.teams)
+
+
+register_distribution("fixed", FixedDistribution)
+
+
+class AllTeamsDistribution(Distribution):
+ def __init__(self, config):
+ self.config = config
+ self.units = config["unit_types"]
+ self.n_units = config["n_units"]
+ self.exceptions = config.get("exception_unit_types", [])
+ self.combinations = list(
+ combinations_with_replacement(self.units, self.n_units)
+ )
+
+ def generate(self) -> Dict[str, Dict[str, Any]]:
+ team = []
+ while not team or all(member in self.exceptions for member in team):
+ team = list(choice(self.combinations))
+ team_id = self.combinations.index(tuple(team))
+ shuffle(team)
+ return {"team_gen": {"item": team, "id": team_id}}
+
+ @property
+ def n_tasks(self):
+ # TODO adjust so that this can handle exceptions
+ assert not self.exceptions
+ return len(self.combinations)
+
+
+register_distribution("all_teams", AllTeamsDistribution)
+
+
+class WeightedTeamsDistribution(Distribution):
+ def __init__(self, config):
+ self.config = config
+ self.units = np.array(config["unit_types"])
+ self.n_units = config["n_units"]
+ self.weights = np.array(config["weights"])
+ self.exceptions = config.get("exception_unit_types", set())
+ self.rng = default_rng()
+
+ def generate(self) -> Dict[str, Dict[str, Any]]:
+ team = []
+ while not team or all(member in self.exceptions for member in team):
+ team = list(
+ self.rng.choice(
+ self.units, size=(self.n_units,), p=self.weights
+ )
+ )
+ shuffle(team)
+ return {"team_gen": {"item": team, "id": 0}}
+
+ @property
+ def n_tasks(self):
+ return inf
+
+
+register_distribution("weighted_teams", WeightedTeamsDistribution)
+
+
+class PerAgentUniformDistribution(Distribution):
+ """A generic distribution for generating some information per-agent drawn
+ from a uniform distribution in a specified range.
+ """
+
+ def __init__(self, config):
+ self.config = config
+ self.lower_bound = config["lower_bound"]
+ self.upper_bound = config["upper_bound"]
+ self.env_key = config["env_key"]
+ self.n_units = config["n_units"]
+ self.rng = default_rng()
+
+ def generate(self) -> Dict[str, Dict[str, Any]]:
+ probs = self.rng.uniform(
+ low=self.lower_bound,
+ high=self.upper_bound,
+ size=(self.n_units, len(self.lower_bound)),
+ )
+ return {self.env_key: {"item": probs, "id": 0}}
+
+ @property
+ def n_tasks(self):
+ return inf
+
+
+register_distribution("per_agent_uniform", PerAgentUniformDistribution)
+
+
+class MaskDistribution(Distribution):
+ def __init__(self, config: Dict[str, Any]):
+ self.config = config
+ self.mask_probability = config["mask_probability"]
+ self.n_units = config["n_units"]
+ self.n_enemies = config["n_enemies"]
+ self.rng = default_rng()
+
+ def generate(self) -> Dict[str, Dict[str, Any]]:
+ mask = self.rng.choice(
+ [0, 1],
+ size=(self.n_units, self.n_enemies),
+ p=[
+ self.mask_probability,
+ 1.0 - self.mask_probability,
+ ],
+ )
+ return {"enemy_mask": {"item": mask, "id": 0}}
+
+ @property
+ def n_tasks(self):
+ return inf
+
+
+register_distribution("mask", MaskDistribution)
+
+
+class ReflectPositionDistribution(Distribution):
+ """Distribution that will generate enemy and ally
+ positions. Generates ally positions uniformly at
+ random and then reflects these in a vertical line
+ half-way across the map to get the enemy positions.
+ Only works when the number of agents and enemies is the same.
+ """
+
+ def __init__(self, config):
+ self.config = config
+ self.n_units = config["n_units"]
+ self.map_x = config["map_x"]
+ self.map_y = config["map_y"]
+ config_copy = deepcopy(config)
+ config_copy["env_key"] = "ally_start_positions"
+ config_copy["lower_bound"] = (0, 0)
+ # subtract one from the x coordinate because SC2 goes wrong
+ # when you spawn ally and enemy units on top of one another
+ # -1 gives a sensible 'buffer zone' of size 2
+ config_copy["upper_bound"] = (self.map_x / 2 - 1, self.map_y)
+ self.pos_generator = PerAgentUniformDistribution(config_copy)
+
+ def generate(self) -> Dict[str, Dict[str, Any]]:
+ ally_positions_dict = self.pos_generator.generate()
+ ally_positions = ally_positions_dict["ally_start_positions"]["item"]
+ enemy_positions = np.zeros_like(ally_positions)
+ enemy_positions[:, 0] = self.map_x - ally_positions[:, 0]
+ enemy_positions[:, 1] = ally_positions[:, 1]
+ return {
+ "ally_start_positions": {"item": ally_positions, "id": 0},
+ "enemy_start_positions": {"item": enemy_positions, "id": 0},
+ }
+
+ @property
+ def n_tasks(self) -> int:
+ return inf
+
+
+register_distribution("reflect_position", ReflectPositionDistribution)
+
+
+class SurroundedPositionDistribution(Distribution):
+ """Distribution that generates ally positions in a
+ circle at the centre of the map, and then has enemies
+ randomly distributed in the four diagonal directions at a
+ random distance.
+ """
+
+ def __init__(self, config):
+ self.config = config
+ self.n_units = config["n_units"]
+ self.n_enemies = config["n_enemies"]
+ self.map_x = config["map_x"]
+ self.map_y = config["map_y"]
+ self.rng = default_rng()
+
+ def generate(self) -> Dict[str, Dict[str, Any]]:
+ # need multiple centre points because SC2 does not cope with
+ # spawning ally and enemy units on top of one another in some
+ # cases
+ offset = 2
+ centre_point = np.array([self.map_x / 2, self.map_y / 2])
+ diagonal_to_centre_point = {
+ 0: np.array([self.map_x / 2 - offset, self.map_y / 2 - offset]),
+ 1: np.array([self.map_x / 2 - offset, self.map_y / 2 + offset]),
+ 2: np.array([self.map_x / 2 + offset, self.map_y / 2 + offset]),
+ 3: np.array([self.map_x / 2 + offset, self.map_y / 2 - offset]),
+ }
+ ally_position = np.tile(centre_point, (self.n_units, 1))
+ enemy_position = np.zeros((self.n_enemies, 2))
+ # decide on the number of groups (between 1 and 4)
+ n_groups = self.rng.integers(1, 5)
+ # generate the number of enemies in each group
+ group_membership = self.rng.multinomial(
+ self.n_enemies, np.ones(n_groups) / n_groups
+ )
+ # decide on the distance along the diagonal for each group
+ group_position = self.rng.uniform(size=(n_groups,))
+ group_diagonals = self.rng.choice(
+ np.array(range(4)), size=(n_groups,), replace=False
+ )
+
+ diagonal_to_point_map = {
+ 0: np.array([0, 0]),
+ 1: np.array([0, self.map_y]),
+ 2: np.array([self.map_x, self.map_y]),
+ 3: np.array([self.map_x, 0]),
+ }
+ unit_index = 0
+ for i in range(n_groups):
+ t = group_position[i]
+ enemy_position[
+ unit_index : unit_index + group_membership[i], :
+ ] = diagonal_to_centre_point[
+ group_diagonals[i]
+ ] * t + diagonal_to_point_map[
+ group_diagonals[i]
+ ] * (
+ 1 - t
+ )
+
+ return {
+ "ally_start_positions": {"item": ally_position, "id": 0},
+ "enemy_start_positions": {"item": enemy_position, "id": 0},
+ }
+
+ @property
+ def n_tasks(self):
+ return inf
+
+
+register_distribution("surrounded", SurroundedPositionDistribution)
+
+# If this becomes common, then should work on a more satisfying way
+# of doing this
+class SurroundedAndReflectPositionDistribution(Distribution):
+ def __init__(self, config):
+ self.p_threshold = config["p"]
+ self.surrounded_distribution = SurroundedPositionDistribution(config)
+ self.reflect_distribution = ReflectPositionDistribution(config)
+ self.rng = default_rng()
+
+ def generate(self) -> Dict[str, Dict[str, Any]]:
+ p = self.rng.random()
+ if p > self.p_threshold:
+ return self.surrounded_distribution.generate()
+ else:
+ return self.reflect_distribution.generate()
+
+ @property
+ def n_tasks(self):
+ return inf
+
+
+register_distribution(
+ "surrounded_and_reflect", SurroundedAndReflectPositionDistribution
+)
diff --git a/src/envs/smac_v2/official/maps/SMAC_Maps/10gen_empty.SC2Map b/src/envs/smac_v2/official/maps/SMAC_Maps/10gen_empty.SC2Map
new file mode 100644
index 0000000..f5bece9
Binary files /dev/null and b/src/envs/smac_v2/official/maps/SMAC_Maps/10gen_empty.SC2Map differ
diff --git a/src/envs/smac_v2/official/maps/SMAC_Maps/10gen_protoss.SC2Map b/src/envs/smac_v2/official/maps/SMAC_Maps/10gen_protoss.SC2Map
new file mode 100644
index 0000000..993909b
Binary files /dev/null and b/src/envs/smac_v2/official/maps/SMAC_Maps/10gen_protoss.SC2Map differ
diff --git a/src/envs/smac_v2/official/maps/SMAC_Maps/10gen_terran.SC2Map b/src/envs/smac_v2/official/maps/SMAC_Maps/10gen_terran.SC2Map
new file mode 100644
index 0000000..6cbc27e
Binary files /dev/null and b/src/envs/smac_v2/official/maps/SMAC_Maps/10gen_terran.SC2Map differ
diff --git a/src/envs/smac_v2/official/maps/SMAC_Maps/10gen_zerg.SC2Map b/src/envs/smac_v2/official/maps/SMAC_Maps/10gen_zerg.SC2Map
new file mode 100644
index 0000000..ca5db87
Binary files /dev/null and b/src/envs/smac_v2/official/maps/SMAC_Maps/10gen_zerg.SC2Map differ
diff --git a/src/envs/smac_v2/official/maps/SMAC_Maps/10m_vs_11m.SC2Map b/src/envs/smac_v2/official/maps/SMAC_Maps/10m_vs_11m.SC2Map
new file mode 100644
index 0000000..1dc2286
Binary files /dev/null and b/src/envs/smac_v2/official/maps/SMAC_Maps/10m_vs_11m.SC2Map differ
diff --git a/src/envs/smac_v2/official/maps/SMAC_Maps/1c3s5z.SC2Map b/src/envs/smac_v2/official/maps/SMAC_Maps/1c3s5z.SC2Map
new file mode 100644
index 0000000..07dfe38
Binary files /dev/null and b/src/envs/smac_v2/official/maps/SMAC_Maps/1c3s5z.SC2Map differ
diff --git a/src/envs/smac_v2/official/maps/SMAC_Maps/25m.SC2Map b/src/envs/smac_v2/official/maps/SMAC_Maps/25m.SC2Map
new file mode 100644
index 0000000..fcfdeb0
Binary files /dev/null and b/src/envs/smac_v2/official/maps/SMAC_Maps/25m.SC2Map differ
diff --git a/src/envs/smac_v2/official/maps/SMAC_Maps/27m_vs_30m.SC2Map b/src/envs/smac_v2/official/maps/SMAC_Maps/27m_vs_30m.SC2Map
new file mode 100644
index 0000000..861c7f7
Binary files /dev/null and b/src/envs/smac_v2/official/maps/SMAC_Maps/27m_vs_30m.SC2Map differ
diff --git a/src/envs/smac_v2/official/maps/SMAC_Maps/2c_vs_64zg.SC2Map b/src/envs/smac_v2/official/maps/SMAC_Maps/2c_vs_64zg.SC2Map
new file mode 100644
index 0000000..b740b6c
Binary files /dev/null and b/src/envs/smac_v2/official/maps/SMAC_Maps/2c_vs_64zg.SC2Map differ
diff --git a/src/envs/smac_v2/official/maps/SMAC_Maps/2m_vs_1z.SC2Map b/src/envs/smac_v2/official/maps/SMAC_Maps/2m_vs_1z.SC2Map
new file mode 100644
index 0000000..f4c05c4
Binary files /dev/null and b/src/envs/smac_v2/official/maps/SMAC_Maps/2m_vs_1z.SC2Map differ
diff --git a/src/envs/smac_v2/official/maps/SMAC_Maps/2s3z.SC2Map b/src/envs/smac_v2/official/maps/SMAC_Maps/2s3z.SC2Map
new file mode 100644
index 0000000..59846cc
Binary files /dev/null and b/src/envs/smac_v2/official/maps/SMAC_Maps/2s3z.SC2Map differ
diff --git a/src/envs/smac_v2/official/maps/SMAC_Maps/2s_vs_1sc.SC2Map b/src/envs/smac_v2/official/maps/SMAC_Maps/2s_vs_1sc.SC2Map
new file mode 100644
index 0000000..c03328d
Binary files /dev/null and b/src/envs/smac_v2/official/maps/SMAC_Maps/2s_vs_1sc.SC2Map differ
diff --git a/src/envs/smac_v2/official/maps/SMAC_Maps/32x32_flat.SC2Map b/src/envs/smac_v2/official/maps/SMAC_Maps/32x32_flat.SC2Map
new file mode 100644
index 0000000..5c4dabf
Binary files /dev/null and b/src/envs/smac_v2/official/maps/SMAC_Maps/32x32_flat.SC2Map differ
diff --git a/src/envs/smac_v2/official/maps/SMAC_Maps/32x32_flat_test.SC2Map b/src/envs/smac_v2/official/maps/SMAC_Maps/32x32_flat_test.SC2Map
new file mode 100644
index 0000000..5c4dabf
Binary files /dev/null and b/src/envs/smac_v2/official/maps/SMAC_Maps/32x32_flat_test.SC2Map differ
diff --git a/src/envs/smac_v2/official/maps/SMAC_Maps/32x32_small.SC2Map b/src/envs/smac_v2/official/maps/SMAC_Maps/32x32_small.SC2Map
new file mode 100644
index 0000000..0d94368
Binary files /dev/null and b/src/envs/smac_v2/official/maps/SMAC_Maps/32x32_small.SC2Map differ
diff --git a/src/envs/smac_v2/official/maps/SMAC_Maps/3m.SC2Map b/src/envs/smac_v2/official/maps/SMAC_Maps/3m.SC2Map
new file mode 100644
index 0000000..b35ec10
Binary files /dev/null and b/src/envs/smac_v2/official/maps/SMAC_Maps/3m.SC2Map differ
diff --git a/src/envs/smac_v2/official/maps/SMAC_Maps/3s5z.SC2Map b/src/envs/smac_v2/official/maps/SMAC_Maps/3s5z.SC2Map
new file mode 100644
index 0000000..e5a4313
Binary files /dev/null and b/src/envs/smac_v2/official/maps/SMAC_Maps/3s5z.SC2Map differ
diff --git a/src/envs/smac_v2/official/maps/SMAC_Maps/3s5z_vs_3s6z.SC2Map b/src/envs/smac_v2/official/maps/SMAC_Maps/3s5z_vs_3s6z.SC2Map
new file mode 100644
index 0000000..3927ca4
Binary files /dev/null and b/src/envs/smac_v2/official/maps/SMAC_Maps/3s5z_vs_3s6z.SC2Map differ
diff --git a/src/envs/smac_v2/official/maps/SMAC_Maps/3s_vs_3z.SC2Map b/src/envs/smac_v2/official/maps/SMAC_Maps/3s_vs_3z.SC2Map
new file mode 100644
index 0000000..4de7cf8
Binary files /dev/null and b/src/envs/smac_v2/official/maps/SMAC_Maps/3s_vs_3z.SC2Map differ
diff --git a/src/envs/smac_v2/official/maps/SMAC_Maps/3s_vs_4z.SC2Map b/src/envs/smac_v2/official/maps/SMAC_Maps/3s_vs_4z.SC2Map
new file mode 100644
index 0000000..8db2dfc
Binary files /dev/null and b/src/envs/smac_v2/official/maps/SMAC_Maps/3s_vs_4z.SC2Map differ
diff --git a/src/envs/smac_v2/official/maps/SMAC_Maps/3s_vs_5z.SC2Map b/src/envs/smac_v2/official/maps/SMAC_Maps/3s_vs_5z.SC2Map
new file mode 100644
index 0000000..70c99d2
Binary files /dev/null and b/src/envs/smac_v2/official/maps/SMAC_Maps/3s_vs_5z.SC2Map differ
diff --git a/src/envs/smac_v2/official/maps/SMAC_Maps/5m_vs_6m.SC2Map b/src/envs/smac_v2/official/maps/SMAC_Maps/5m_vs_6m.SC2Map
new file mode 100644
index 0000000..f2ae42c
Binary files /dev/null and b/src/envs/smac_v2/official/maps/SMAC_Maps/5m_vs_6m.SC2Map differ
diff --git a/src/envs/smac_v2/official/maps/SMAC_Maps/6h_vs_8z.SC2Map b/src/envs/smac_v2/official/maps/SMAC_Maps/6h_vs_8z.SC2Map
new file mode 100644
index 0000000..df01eb6
Binary files /dev/null and b/src/envs/smac_v2/official/maps/SMAC_Maps/6h_vs_8z.SC2Map differ
diff --git a/src/envs/smac_v2/official/maps/SMAC_Maps/8m.SC2Map b/src/envs/smac_v2/official/maps/SMAC_Maps/8m.SC2Map
new file mode 100644
index 0000000..6593c72
Binary files /dev/null and b/src/envs/smac_v2/official/maps/SMAC_Maps/8m.SC2Map differ
diff --git a/src/envs/smac_v2/official/maps/SMAC_Maps/8m_vs_9m.SC2Map b/src/envs/smac_v2/official/maps/SMAC_Maps/8m_vs_9m.SC2Map
new file mode 100644
index 0000000..5b8815f
Binary files /dev/null and b/src/envs/smac_v2/official/maps/SMAC_Maps/8m_vs_9m.SC2Map differ
diff --git a/src/envs/smac_v2/official/maps/SMAC_Maps/MMM.SC2Map b/src/envs/smac_v2/official/maps/SMAC_Maps/MMM.SC2Map
new file mode 100644
index 0000000..ed26fe4
Binary files /dev/null and b/src/envs/smac_v2/official/maps/SMAC_Maps/MMM.SC2Map differ
diff --git a/src/envs/smac_v2/official/maps/SMAC_Maps/MMM2.SC2Map b/src/envs/smac_v2/official/maps/SMAC_Maps/MMM2.SC2Map
new file mode 100644
index 0000000..ab25a02
Binary files /dev/null and b/src/envs/smac_v2/official/maps/SMAC_Maps/MMM2.SC2Map differ
diff --git a/src/envs/smac_v2/official/maps/SMAC_Maps/bane_vs_bane.SC2Map b/src/envs/smac_v2/official/maps/SMAC_Maps/bane_vs_bane.SC2Map
new file mode 100644
index 0000000..bb81284
Binary files /dev/null and b/src/envs/smac_v2/official/maps/SMAC_Maps/bane_vs_bane.SC2Map differ
diff --git a/src/envs/smac_v2/official/maps/SMAC_Maps/corridor.SC2Map b/src/envs/smac_v2/official/maps/SMAC_Maps/corridor.SC2Map
new file mode 100644
index 0000000..90daed6
Binary files /dev/null and b/src/envs/smac_v2/official/maps/SMAC_Maps/corridor.SC2Map differ
diff --git a/src/envs/smac_v2/official/maps/SMAC_Maps/so_many_baneling.SC2Map b/src/envs/smac_v2/official/maps/SMAC_Maps/so_many_baneling.SC2Map
new file mode 100644
index 0000000..6a184e3
Binary files /dev/null and b/src/envs/smac_v2/official/maps/SMAC_Maps/so_many_baneling.SC2Map differ
diff --git a/src/envs/smac_v2/official/maps/__init__.py b/src/envs/smac_v2/official/maps/__init__.py
new file mode 100644
index 0000000..4017bb3
--- /dev/null
+++ b/src/envs/smac_v2/official/maps/__init__.py
@@ -0,0 +1,10 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from . import smac_maps
+
+
+def get_map_params(map_name):
+ map_param_registry = smac_maps.get_smac_map_registry()
+ return map_param_registry[map_name]
diff --git a/src/envs/smac_v2/official/maps/smac_maps.py b/src/envs/smac_v2/official/maps/smac_maps.py
new file mode 100644
index 0000000..aa3c106
--- /dev/null
+++ b/src/envs/smac_v2/official/maps/smac_maps.py
@@ -0,0 +1,58 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from pysc2.maps import lib
+
+
+
+class SMACMap(lib.Map):
+ directory = "SMAC_Maps"
+ download = "https://github.com/oxwhirl/smac#smac-maps"
+ players = 2
+ step_mul = 8
+ game_steps_per_episode = 0
+
+
+map_param_registry = {
+ "10gen_terran": {
+ "n_agents": 10,
+ "n_enemies": 10,
+ "limit": 400,
+ "a_race": "T",
+ "b_race": "T",
+ "unit_type_bits": 3,
+ "map_type": "terran_gen",
+ "map_name": "32x32_flat",
+ },
+ "10gen_zerg": {
+ "n_agents": 10,
+ "n_enemies": 10,
+ "limit": 400,
+ "a_race": "Z",
+ "b_race": "Z",
+ "unit_type_bits": 3,
+ "map_type": "zerg_gen",
+ "map_name": "32x32_flat",
+ },
+ "10gen_protoss": {
+ "n_agents": 10,
+ "n_enemies": 10,
+ "limit": 400,
+ "a_race": "P",
+ "b_race": "P",
+ "unit_type_bits": 3,
+ "map_type": "protoss_gen",
+ "map_name": "32x32_flat",
+ },
+}
+
+
+def get_smac_map_registry():
+ return map_param_registry
+
+
+for name, map_params in map_param_registry.items():
+ globals()[name] = type(
+ name, (SMACMap,), dict(filename=map_params["map_name"])
+ )
diff --git a/src/envs/smac_v2/official/render.py b/src/envs/smac_v2/official/render.py
new file mode 100644
index 0000000..8fb1216
--- /dev/null
+++ b/src/envs/smac_v2/official/render.py
@@ -0,0 +1,347 @@
+import numpy as np
+import re
+import subprocess
+import platform
+from absl import logging
+import math
+import time
+import collections
+import os
+import pygame
+import queue
+
+from pysc2.lib import colors
+from pysc2.lib import point
+from pysc2.lib.renderer_human import _Surface
+from pysc2.lib import transform
+from pysc2.lib import features
+
+
+def clamp(n, smallest, largest):
+ return max(smallest, min(n, largest))
+
+
+def _get_desktop_size():
+ """Get the desktop size."""
+ if platform.system() == "Linux":
+ try:
+ xrandr_query = subprocess.check_output(["xrandr", "--query"])
+ sizes = re.findall(
+ r"\bconnected primary (\d+)x(\d+)", str(xrandr_query)
+ )
+ if sizes[0]:
+ return point.Point(int(sizes[0][0]), int(sizes[0][1]))
+ except ValueError:
+ logging.error("Failed to get the resolution from xrandr.")
+
+ # Most general, but doesn't understand multiple monitors.
+ display_info = pygame.display.Info()
+ return point.Point(display_info.current_w, display_info.current_h)
+
+
+class StarCraft2Renderer:
+ def __init__(self, env, mode):
+ os.environ["PYGAME_HIDE_SUPPORT_PROMPT"] = "hide"
+
+ self.env = env
+ self.mode = mode
+ self.obs = None
+ self._window_scale = 0.75
+ self.game_info = game_info = self.env._controller.game_info()
+ self.static_data = self.env._controller.data()
+
+ self._obs_queue = queue.Queue()
+ self._game_times = collections.deque(
+ maxlen=100
+ ) # Avg FPS over 100 frames. # pytype: disable=wrong-keyword-args
+ self._render_times = collections.deque(
+ maxlen=100
+ ) # pytype: disable=wrong-keyword-args
+ self._last_time = time.time()
+ self._last_game_loop = 0
+ self._name_lengths = {}
+
+ self._map_size = point.Point.build(game_info.start_raw.map_size)
+ self._playable = point.Rect(
+ point.Point.build(game_info.start_raw.playable_area.p0),
+ point.Point.build(game_info.start_raw.playable_area.p1),
+ )
+
+ window_size_px = point.Point(
+ self.env.window_size[0], self.env.window_size[1]
+ )
+ window_size_px = self._map_size.scale_max_size(
+ window_size_px * self._window_scale
+ ).ceil()
+ self._scale = window_size_px.y // 32
+
+ self.display = pygame.Surface(window_size_px)
+
+ if mode == "human":
+ self.display = pygame.display.set_mode(window_size_px, 0, 32)
+ pygame.display.init()
+
+ pygame.display.set_caption("Starcraft Viewer")
+ pygame.font.init()
+ self._world_to_world_tl = transform.Linear(
+ point.Point(1, -1), point.Point(0, self._map_size.y)
+ )
+ self._world_tl_to_screen = transform.Linear(scale=window_size_px / 32)
+ self.screen_transform = transform.Chain(
+ self._world_to_world_tl, self._world_tl_to_screen
+ )
+
+ surf_loc = point.Rect(point.origin, window_size_px)
+ sub_surf = self.display.subsurface(
+ pygame.Rect(surf_loc.tl, surf_loc.size)
+ )
+ self._surf = _Surface(
+ sub_surf,
+ None,
+ surf_loc,
+ self.screen_transform,
+ None,
+ self.draw_screen,
+ )
+
+ self._font_small = pygame.font.Font(None, int(self._scale * 0.5))
+ self._font_large = pygame.font.Font(None, self._scale)
+
+ def close(self):
+ pygame.display.quit()
+ pygame.quit()
+
+ def _get_units(self):
+ for u in sorted(
+ self.obs.observation.raw_data.units,
+ key=lambda u: (u.pos.z, u.owner != 16, -u.radius, u.tag),
+ ):
+ yield u, point.Point.build(u.pos)
+
+ def get_unit_name(self, surf, name, radius):
+ """Get a length limited unit name for drawing units."""
+ key = (name, radius)
+ if key not in self._name_lengths:
+ max_len = surf.world_to_surf.fwd_dist(radius * 1.6)
+ for i in range(len(name)):
+ if self._font_small.size(name[: i + 1])[0] > max_len:
+ self._name_lengths[key] = name[:i]
+ break
+ else:
+ self._name_lengths[key] = name
+ return self._name_lengths[key]
+
+ def render(self, mode):
+ self.obs = self.env._obs
+ self.score = self.env.reward
+ self.step = self.env._episode_steps
+
+ now = time.time()
+ self._game_times.append(
+ (
+ now - self._last_time,
+ max(
+ 1,
+ self.obs.observation.game_loop
+ - self.obs.observation.game_loop,
+ ),
+ )
+ )
+
+ if mode == "human":
+ pygame.event.pump()
+
+ self._surf.draw(self._surf)
+
+ observation = np.array(pygame.surfarray.pixels3d(self.display))
+
+ if mode == "human":
+ pygame.display.flip()
+
+ self._last_time = now
+ self._last_game_loop = self.obs.observation.game_loop
+ # self._obs_queue.put(self.obs)
+ return (
+ np.transpose(observation, axes=(1, 0, 2))
+ if mode == "rgb_array"
+ else None
+ )
+
+ def draw_base_map(self, surf):
+ """Draw the base map."""
+ hmap_feature = features.SCREEN_FEATURES.height_map
+ hmap = self.env.terrain_height * 255
+ hmap = hmap.astype(np.uint8)
+ if (
+ self.env.map_name == "corridor"
+ or self.env.map_name == "so_many_baneling"
+ or self.env.map_name == "2s_vs_1sc"
+ ):
+ hmap = np.flip(hmap)
+ else:
+ hmap = np.rot90(hmap, axes=(1, 0))
+ if not hmap.any():
+ hmap = hmap + 100 # pylint: disable=g-no-augmented-assignment
+ hmap_color = hmap_feature.color(hmap)
+ out = hmap_color * 0.6
+
+ surf.blit_np_array(out)
+
+ def draw_units(self, surf):
+ """Draw the units."""
+ unit_dict = None # Cache the units {tag: unit_proto} for orders.
+ tau = 2 * math.pi
+ for u, p in self._get_units():
+ fraction_damage = clamp(
+ (u.health_max - u.health) / (u.health_max or 1), 0, 1
+ )
+ surf.draw_circle(
+ colors.PLAYER_ABSOLUTE_PALETTE[u.owner], p, u.radius
+ )
+
+ if fraction_damage > 0:
+ surf.draw_circle(
+ colors.PLAYER_ABSOLUTE_PALETTE[u.owner] // 2,
+ p,
+ u.radius * fraction_damage,
+ )
+ surf.draw_circle(colors.black, p, u.radius, thickness=1)
+
+ if self.static_data.unit_stats[u.unit_type].movement_speed > 0:
+ surf.draw_arc(
+ colors.white,
+ p,
+ u.radius,
+ u.facing - 0.1,
+ u.facing + 0.1,
+ thickness=1,
+ )
+
+ def draw_arc_ratio(
+ color, world_loc, radius, start, end, thickness=1
+ ):
+ surf.draw_arc(
+ color, world_loc, radius, start * tau, end * tau, thickness
+ )
+
+ if u.shield and u.shield_max:
+ draw_arc_ratio(
+ colors.blue, p, u.radius - 0.05, 0, u.shield / u.shield_max
+ )
+
+ if u.energy and u.energy_max:
+ draw_arc_ratio(
+ colors.purple * 0.9,
+ p,
+ u.radius - 0.1,
+ 0,
+ u.energy / u.energy_max,
+ )
+ elif u.orders and 0 < u.orders[0].progress < 1:
+ draw_arc_ratio(
+ colors.cyan, p, u.radius - 0.15, 0, u.orders[0].progress
+ )
+ if u.buff_duration_remain and u.buff_duration_max:
+ draw_arc_ratio(
+ colors.white,
+ p,
+ u.radius - 0.2,
+ 0,
+ u.buff_duration_remain / u.buff_duration_max,
+ )
+ if u.attack_upgrade_level:
+ draw_arc_ratio(
+ self.upgrade_colors[u.attack_upgrade_level],
+ p,
+ u.radius - 0.25,
+ 0.18,
+ 0.22,
+ thickness=3,
+ )
+ if u.armor_upgrade_level:
+ draw_arc_ratio(
+ self.upgrade_colors[u.armor_upgrade_level],
+ p,
+ u.radius - 0.25,
+ 0.23,
+ 0.27,
+ thickness=3,
+ )
+ if u.shield_upgrade_level:
+ draw_arc_ratio(
+ self.upgrade_colors[u.shield_upgrade_level],
+ p,
+ u.radius - 0.25,
+ 0.28,
+ 0.32,
+ thickness=3,
+ )
+
+ def write_small(loc, s):
+ surf.write_world(self._font_small, colors.white, loc, str(s))
+
+ name = self.get_unit_name(
+ surf,
+ self.static_data.units.get(u.unit_type, ""),
+ u.radius,
+ )
+
+ if name:
+ write_small(p, name)
+
+ start_point = p
+ for o in u.orders:
+ target_point = None
+ if o.HasField("target_unit_tag"):
+ if unit_dict is None:
+ unit_dict = {
+ t.tag: t
+ for t in self.obs.observation.raw_data.units
+ }
+ target_unit = unit_dict.get(o.target_unit_tag)
+ if target_unit:
+ target_point = point.Point.build(target_unit.pos)
+ if target_point:
+ surf.draw_line(colors.cyan, start_point, target_point)
+ start_point = target_point
+ else:
+ break
+
+ def draw_overlay(self, surf):
+ """Draw the overlay describing resources."""
+ obs = self.obs.observation
+ times, steps = zip(*self._game_times)
+ sec = obs.game_loop // 22.4
+ surf.write_screen(
+ self._font_large,
+ colors.green,
+ (-0.2, 0.2),
+ "Score: %s, Step: %s, %.1f/s, Time: %d:%02d"
+ % (
+ self.score,
+ self.step,
+ sum(steps) / (sum(times) or 1),
+ sec // 60,
+ sec % 60,
+ ),
+ align="right",
+ )
+ surf.write_screen(
+ self._font_large,
+ colors.green * 0.8,
+ (-0.2, 1.2),
+ "APM: %d, EPM: %d, FPS: O:%.1f, R:%.1f"
+ % (
+ obs.score.score_details.current_apm,
+ obs.score.score_details.current_effective_apm,
+ len(times) / (sum(times) or 1),
+ len(self._render_times) / (sum(self._render_times) or 1),
+ ),
+ align="right",
+ )
+
+ def draw_screen(self, surf):
+ """Draw the screen area."""
+ self.draw_base_map(surf)
+ self.draw_units(surf)
+ self.draw_overlay(surf)
diff --git a/src/envs/smac_v2/official/sc2_official.py b/src/envs/smac_v2/official/sc2_official.py
new file mode 100644
index 0000000..fdeae4f
--- /dev/null
+++ b/src/envs/smac_v2/official/sc2_official.py
@@ -0,0 +1,2485 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from smac.env.multiagentenv import MultiAgentEnv
+
+from smac.env.starcraft2.maps import get_map_params
+
+
+import atexit
+from warnings import warn
+from operator import attrgetter
+from copy import deepcopy
+import numpy as np
+import enum
+import math
+from absl import logging
+from pysc2.lib.units import Neutral, Protoss, Terran, Zerg
+
+from pysc2 import maps
+from pysc2 import run_configs
+from pysc2.lib import protocol
+
+from s2clientprotocol import common_pb2 as sc_common
+from s2clientprotocol import sc2api_pb2 as sc_pb
+from s2clientprotocol import raw_pb2 as r_pb
+from s2clientprotocol import debug_pb2 as d_pb
+
+races = {
+ "R": sc_common.Random,
+ "P": sc_common.Protoss,
+ "T": sc_common.Terran,
+ "Z": sc_common.Zerg,
+}
+
+difficulties = {
+ "1": sc_pb.VeryEasy,
+ "2": sc_pb.Easy,
+ "3": sc_pb.Medium,
+ "4": sc_pb.MediumHard,
+ "5": sc_pb.Hard,
+ "6": sc_pb.Harder,
+ "7": sc_pb.VeryHard,
+ "8": sc_pb.CheatVision,
+ "9": sc_pb.CheatMoney,
+ "A": sc_pb.CheatInsane,
+}
+
+actions = {
+ "move": 16, # target: PointOrUnit
+ "attack": 23, # target: PointOrUnit
+ "stop": 4, # target: None
+ "heal": 386, # Unit
+}
+
+
+class Direction(enum.IntEnum):
+ NORTH = 0
+ SOUTH = 1
+ EAST = 2
+ WEST = 3
+
+
+EPS = 1e-7
+
+
+class StarCraft2Env(MultiAgentEnv):
+ """The StarCraft II environment for decentralised multi-agent
+ micromanagement scenarios.
+ """
+
+ def __init__(
+ self,
+ map_name="8m",
+ step_mul=8,
+ move_amount=2,
+ difficulty="7",
+ game_version=None,
+ seed=None,
+ continuing_episode=False,
+ obs_all_health=True,
+ obs_own_health=True,
+ obs_last_action=False,
+ obs_pathing_grid=False,
+ obs_terrain_height=False,
+ obs_instead_of_state=False,
+ obs_timestep_number=False,
+ obs_own_pos=False,
+ obs_starcraft=True,
+ conic_fov=False,
+ num_fov_actions=12,
+ state_last_action=True,
+ state_timestep_number=False,
+ reward_sparse=False,
+ reward_only_positive=True,
+ reward_death_value=10,
+ reward_win=200,
+ reward_defeat=0,
+ reward_negative_scale=0.5,
+ reward_scale=True,
+ reward_scale_rate=20,
+ kill_unit_step_mul=2,
+ fully_observable=False,
+ capability_config={},
+ replay_dir="",
+ replay_prefix="",
+ window_size_x=1920,
+ window_size_y=1200,
+ heuristic_ai=False,
+ heuristic_rest=False,
+ debug=False,
+ ):
+ """
+ Create a StarCraftC2Env environment.
+
+ Parameters
+ ----------
+ map_name : str, optional
+ The name of the SC2 map to play (default is "8m"). The full list
+ can be found by running bin/map_list.
+ step_mul : int, optional
+ How many game steps per agent step (default is 8). None
+ indicates to use the default map step_mul.
+ move_amount : float, optional
+ How far away units are ordered to move per step (default is 2).
+ difficulty : str, optional
+ The difficulty of built-in computer AI bot (default is "7").
+ game_version : str, optional
+ StarCraft II game version (default is None). None indicates the
+ latest version.
+ seed : int, optional
+ Random seed used during game initialisation. This allows to
+ continuing_episode : bool, optional
+ Whether to consider episodes continuing or finished after time
+ limit is reached (default is False).
+ obs_all_health : bool, optional
+ Agents receive the health of all units (in the sight range) as part
+ of observations (default is True).
+ obs_own_health : bool, optional
+ Agents receive their own health as a part of observations (default
+ is False). This flag is ignored when obs_all_health == True.
+ obs_last_action : bool, optional
+ Agents receive the last actions of all units (in the sight range)
+ as part of observations (default is False).
+ obs_pathing_grid : bool, optional
+ Whether observations include pathing values surrounding the agent
+ (default is False).
+ obs_terrain_height : bool, optional
+ Whether observations include terrain height values surrounding the
+ agent (default is False).
+ obs_instead_of_state : bool, optional
+ Use combination of all agents' observations as the global state
+ (default is False).
+ obs_timestep_number : bool, optional
+ Whether observations include the current timestep of the episode
+ (default is False).
+ state_last_action : bool, optional
+ Include the last actions of all agents as part of the global state
+ (default is True).
+ state_timestep_number : bool, optional
+ Whether the state include the current timestep of the episode
+ (default is False).
+ reward_sparse : bool, optional
+ Receive 1/-1 reward for winning/loosing an episode (default is
+ False). Whe rest of reward parameters are ignored if True.
+ reward_only_positive : bool, optional
+ Reward is always positive (default is True).
+ reward_death_value : float, optional
+ The amount of reward received for killing an enemy unit (default
+ is 10). This is also the negative penalty for having an allied unit
+ killed if reward_only_positive == False.
+ reward_win : float, optional
+ The reward for winning in an episode (default is 200).
+ reward_defeat : float, optional
+ The reward for loosing in an episode (default is 0). This value
+ should be nonpositive.
+ reward_negative_scale : float, optional
+ Scaling factor for negative rewards (default is 0.5). This
+ parameter is ignored when reward_only_positive == True.
+ reward_scale : bool, optional
+ Whether or not to scale the reward (default is True).
+ reward_scale_rate : float, optional
+ Reward scale rate (default is 20). When reward_scale == True, the
+ reward received by the agents is divided by (max_reward /
+ reward_scale_rate), where max_reward is the maximum possible
+ reward per episode without considering the shield regeneration
+ of Protoss units.
+ replay_dir : str, optional
+ The directory to save replays (default is None). If None, the
+ replay will be saved in Replays directory where StarCraft II is
+ installed.
+ replay_prefix : str, optional
+ The prefix of the replay to be saved (default is None). If None,
+ the name of the map will be used.
+ window_size_x : int, optional
+ The length of StarCraft II window size (default is 1920).
+ window_size_y: int, optional
+ The height of StarCraft II window size (default is 1200).
+ heuristic_ai: bool, optional
+ Whether or not to use a non-learning heuristic AI (default False).
+ heuristic_rest: bool, optional
+ At any moment, restrict the actions of the heuristic AI to be
+ chosen from actions available to RL agents (default is False).
+ Ignored if heuristic_ai == False.
+ debug: bool, optional
+ Log messages about observations, state, actions and rewards for
+ debugging purposes (default is False).
+ """
+ # Map arguments
+ self.map_name = map_name
+ map_params = get_map_params(self.map_name)
+ self.map_params = map_params
+ self.episode_limit = map_params["limit"]
+ self._move_amount = move_amount
+ self._step_mul = step_mul
+ self._kill_unit_step_mul = kill_unit_step_mul
+ self.difficulty = difficulty
+
+ # Observations and state
+ self.obs_own_health = obs_own_health
+ self.obs_all_health = obs_all_health
+ self.obs_instead_of_state = obs_instead_of_state
+ self.obs_last_action = obs_last_action
+ self.obs_pathing_grid = obs_pathing_grid
+ self.obs_terrain_height = obs_terrain_height
+ self.obs_timestep_number = obs_timestep_number
+ self.obs_starcraft = obs_starcraft
+ self.state_last_action = state_last_action
+ self.state_timestep_number = state_timestep_number
+ if self.obs_all_health:
+ self.obs_own_health = True
+ self.n_obs_pathing = 8
+ self.n_obs_height = 9
+
+ # Rewards args
+ self.reward_sparse = reward_sparse
+ self.reward_only_positive = reward_only_positive
+ self.reward_negative_scale = reward_negative_scale
+ self.reward_death_value = reward_death_value
+ self.reward_win = reward_win
+ self.reward_defeat = reward_defeat
+ self.reward_scale = reward_scale
+ self.reward_scale_rate = reward_scale_rate
+
+ # Meta MARL
+ self.capability_config = capability_config
+ self.fully_observable = fully_observable
+ self.stochastic_attack = "attack" in self.capability_config
+ self.stochastic_health = "health" in self.capability_config
+ self.replace_teammates = "team_gen" in self.capability_config
+ self.obs_own_pos = obs_own_pos
+ self.mask_enemies = "enemy_mask" in self.capability_config
+ if self.stochastic_attack:
+ self.zero_pad_stochastic_attack = not self.capability_config[
+ "attack"
+ ]["observe"]
+ self.observe_attack_probs = self.capability_config["attack"][
+ "observe"
+ ]
+ if self.stochastic_health:
+ self.zero_pad_health = not self.capability_config["health"][
+ "observe"
+ ]
+ self.observe_teammate_health = self.capability_config["health"][
+ "observe"
+ ]
+ if self.replace_teammates:
+ self.zero_pad_unit_types = not self.capability_config["team_gen"][
+ "observe"
+ ]
+ self.observe_teammate_types = self.capability_config["team_gen"][
+ "observe"
+ ]
+ self.n_agents = (
+ map_params["n_agents"]
+ if not self.replace_teammates
+ else self.capability_config["team_gen"]["n_units"]
+ )
+ self.n_enemies = (
+ map_params["n_enemies"]
+ if not self.replace_teammates
+ else self.capability_config["team_gen"]["n_units"]
+ )
+ self.random_start = "start_positions" in self.capability_config
+ self.conic_fov = conic_fov
+ self.n_fov_actions = num_fov_actions if self.conic_fov else 0
+ self.conic_fov_angle = (
+ (2 * np.pi) / self.n_fov_actions if self.conic_fov else 0
+ )
+ # Other
+ self.game_version = game_version
+ self.continuing_episode = continuing_episode
+ self._seed = seed
+ self.heuristic_ai = heuristic_ai
+ self.heuristic_rest = heuristic_rest
+ self.debug = debug
+ self.window_size = (window_size_x, window_size_y)
+ self.replay_dir = replay_dir
+ self.replay_prefix = replay_prefix
+
+ # Actions
+ self.n_actions_move = 4
+
+ self.n_actions_no_attack = self.n_actions_move + self.n_fov_actions + 2
+ self.n_actions = self.n_actions_no_attack + self.n_enemies
+
+ # Map info
+ self._agent_race = map_params["a_race"]
+ self._bot_race = map_params["b_race"]
+ self.shield_bits_ally = 1 if self._agent_race == "P" else 0
+ self.shield_bits_enemy = 1 if self._bot_race == "P" else 0
+ # NOTE: The map_type, which is used to initialise the unit
+ # type ids, the unit_type_bits and the races, are still properties of the
+ # map. This means even the 10gen_{race} maps are limited to the
+ # unit types statically defined in the unit type id assignment.
+ # Lifting this restriction shouldn't be too much work, I've just
+ # not done it.
+ self.unit_type_bits = map_params["unit_type_bits"]
+ self.map_type = map_params["map_type"]
+ self._unit_types = None
+
+ self.max_reward = (
+ self.n_enemies * self.reward_death_value + self.reward_win
+ )
+
+ # create lists containing the names of attributes returned in states
+ self.ally_state_attr_names = [
+ "health",
+ "energy/cooldown",
+ "rel_x",
+ "rel_y",
+ ]
+ self.enemy_state_attr_names = ["health", "rel_x", "rel_y"]
+
+ if self.shield_bits_ally > 0:
+ self.ally_state_attr_names += ["shield"]
+ if self.shield_bits_enemy > 0:
+ self.enemy_state_attr_names += ["shield"]
+ if self.conic_fov:
+ self.ally_state_attr_names += ["fov_x", "fov_y"]
+
+ self.capability_attr_names = []
+ if "attack" in self.capability_config:
+ self.capability_attr_names += ["attack_probability"]
+ if "health" in self.capability_config:
+ self.capability_attr_names += ["total_health"]
+ if self.unit_type_bits > 0:
+ bit_attr_names = [
+ "type_{}".format(bit) for bit in range(self.unit_type_bits)
+ ]
+ self.capability_attr_names += bit_attr_names
+ self.enemy_state_attr_names += bit_attr_names
+
+ self.agents = {}
+ self.enemies = {}
+ self.unit_name_to_id_map = {}
+ self.id_to_unit_name_map = {}
+ self._episode_count = 0
+ self._episode_steps = 0
+ self._total_steps = 0
+ self._obs = None
+ self.battles_won = 0
+ self.battles_game = 0
+ self.timeouts = 0
+ self.force_restarts = 0
+ self.last_stats = None
+ self.agent_attack_probabilities = np.zeros(self.n_agents)
+ self.agent_health_levels = np.zeros(self.n_agents)
+ self.death_tracker_ally = np.zeros(self.n_agents)
+ self.death_tracker_enemy = np.zeros(self.n_enemies)
+ self.fov_directions = np.zeros((self.n_agents, 2))
+ self.fov_directions[:, 0] = 1.0
+ self.canonical_fov_directions = np.array(
+ [
+ (
+ np.cos(2 * np.pi * (i / self.n_fov_actions)),
+ np.sin(2 * np.pi * (i / self.n_fov_actions)),
+ )
+ for i in range(self.n_fov_actions)
+ ]
+ )
+ self.new_unit_positions = np.zeros((self.n_agents, 2))
+ self.previous_ally_units = None
+ self.previous_enemy_units = None
+ self.last_action = np.zeros((self.n_agents, self.n_actions))
+ self.init_positions = np.zeros((self.n_agents, 2))
+ self._min_unit_type = 0
+ self.marine_id = self.marauder_id = self.medivac_id = 0
+ self.hydralisk_id = self.zergling_id = self.baneling_id = 0
+ self.stalker_id = self.colossus_id = self.zealot_id = 0
+ self.max_distance_x = 0
+ self.max_distance_y = 0
+ self.map_x = 0
+ self.map_y = 0
+ self.reward = 0
+ self.renderer = None
+ self.terrain_height = None
+ self.pathing_grid = None
+ self.state_feature_names = self.build_state_feature_names()
+ self.obs_feature_names = self.build_obs_feature_names()
+ self._run_config = None
+ self._sc2_proc = None
+ self._controller = None
+ # Try to avoid leaking SC2 processes on shutdown
+ atexit.register(lambda: self.close())
+
+ def _only_one_meta_marl_flag_on(self):
+ """Function that checks that either all the meta marl flags are off,
+ or at most one has been enabled."""
+ if self.stochastic_attack:
+ return not self.stochastic_health and not self.replace_teammates
+ else:
+ return not self.replace_teammates or not self.stochastic_health
+
+ def _launch(self):
+ """Launch the StarCraft II game."""
+ self._run_config = run_configs.get(version=self.game_version)
+ self.version = self._run_config.version
+ _map = maps.get(self.map_name)
+
+ # Setting up the interface
+ interface_options = sc_pb.InterfaceOptions(raw=True, score=False)
+ self._sc2_proc = self._run_config.start(
+ window_size=self.window_size, want_rgb=False
+ )
+ self._controller = self._sc2_proc.controller
+
+ # Request to create the game
+ create = sc_pb.RequestCreateGame(
+ local_map=sc_pb.LocalMap(
+ map_path=_map.path,
+ map_data=self._run_config.map_data(_map.path),
+ ),
+ realtime=False,
+ random_seed=self._seed,
+ )
+ create.player_setup.add(type=sc_pb.Participant)
+ create.player_setup.add(
+ type=sc_pb.Computer,
+ race=races[self._bot_race],
+ difficulty=difficulties[self.difficulty],
+ )
+ self._controller.create_game(create)
+
+ join = sc_pb.RequestJoinGame(
+ race=races[self._agent_race], options=interface_options
+ )
+ self._controller.join_game(join)
+
+ game_info = self._controller.game_info()
+ map_info = game_info.start_raw
+ self.map_play_area_min = map_info.playable_area.p0
+ self.map_play_area_max = map_info.playable_area.p1
+ self.max_distance_x = (
+ self.map_play_area_max.x - self.map_play_area_min.x
+ )
+ self.max_distance_y = (
+ self.map_play_area_max.y - self.map_play_area_min.y
+ )
+ self.map_x = map_info.map_size.x
+ self.map_y = map_info.map_size.y
+
+ if map_info.pathing_grid.bits_per_pixel == 1:
+ vals = np.array(list(map_info.pathing_grid.data)).reshape(
+ self.map_x, int(self.map_y / 8)
+ )
+ self.pathing_grid = np.transpose(
+ np.array(
+ [
+ [(b >> i) & 1 for b in row for i in range(7, -1, -1)]
+ for row in vals
+ ],
+ dtype=np.bool,
+ )
+ )
+ else:
+ self.pathing_grid = np.invert(
+ np.flip(
+ np.transpose(
+ np.array(
+ list(map_info.pathing_grid.data), dtype=np.bool
+ ).reshape(self.map_x, self.map_y)
+ ),
+ axis=1,
+ )
+ )
+
+ self.terrain_height = (
+ np.flip(
+ np.transpose(
+ np.array(list(map_info.terrain_height.data)).reshape(
+ self.map_x, self.map_y
+ )
+ ),
+ 1,
+ )
+ / 255
+ )
+
+ def reset(self, episode_config={}):
+ """Reset the environment. Required after each full episode.
+ Returns initial observations and states.
+ """
+ self._episode_steps = 0
+ self.episode_config = episode_config
+ if self._episode_count == 0:
+ # Launch StarCraft II
+ self._launch()
+ else:
+ self._restart()
+
+ # Information kept for counting the reward
+ self.agent_attack_probabilities = episode_config.get("attack", {}).get(
+ "item", None
+ )
+ self.agent_health_levels = episode_config.get("health", {}).get(
+ "item", None
+ )
+ self.enemy_mask = episode_config.get("enemy_mask", {}).get(
+ "item", None
+ )
+ self.ally_start_positions = episode_config.get(
+ "ally_start_positions", {}
+ ).get("item", None)
+ self.enemy_start_positions = episode_config.get(
+ "enemy_start_positions", {}
+ ).get("item", None)
+ self.mask_enemies = self.enemy_mask is not None
+ team = episode_config.get("team_gen", {}).get("item", None)
+ self.death_tracker_ally = np.zeros(self.n_agents)
+ self.death_tracker_enemy = np.zeros(self.n_enemies)
+ self.fov_directions = np.zeros((self.n_agents, 2))
+ self.fov_directions[:, 0] = 1.0
+ self.previous_ally_units = None
+ self.previous_enemy_units = None
+ self.win_counted = False
+ self.defeat_counted = False
+ if self.debug:
+ logging.debug(
+ f"Attack Probabilities: {self.agent_attack_probabilities}"
+ )
+ logging.debug(f"Health Levels: {self.agent_health_levels}")
+ self.last_action = np.zeros((self.n_agents, self.n_actions))
+
+ if self.heuristic_ai:
+ self.heuristic_targets = [None] * self.n_agents
+
+ try:
+ self._obs = self._controller.observe()
+ self.init_units(team, episode_config=episode_config)
+ except (protocol.ProtocolError, protocol.ConnectionError):
+ self.full_restart()
+
+ if self.debug:
+ logging.debug(
+ "Started Episode {}".format(self._episode_count).center(
+ 60, "*"
+ )
+ )
+ return self.get_obs(), self.get_state()
+
+ def _restart(self):
+ """Restart the environment by killing all units on the map.
+ There is a trigger in the SC2Map file, which restarts the
+ episode when there are no units left.
+ """
+ try:
+ self._kill_all_units()
+ except (protocol.ProtocolError, protocol.ConnectionError):
+ self.full_restart()
+
+ def full_restart(self):
+ """Full restart. Closes the SC2 process and launches a new one."""
+ self._sc2_proc.close()
+ self._launch()
+ self.force_restarts += 1
+
+ def _kill_units_below_health_level(self):
+ units_to_kill = []
+ for al_id, al_unit in self.agents.items():
+ if (
+ al_unit.health / al_unit.health_max
+ < self.agent_health_levels[al_id]
+ ) and not self.death_tracker_ally[al_id]:
+ units_to_kill.append(al_unit.tag)
+ self._kill_units(units_to_kill)
+
+ def step(self, actions):
+ """A single environment step. Returns reward, terminated, info."""
+ actions_int = [int(a) for a in actions]
+
+ self.last_action = np.eye(self.n_actions)[np.array(actions_int)]
+
+ # Collect individual actions
+ sc_actions = []
+ if self.debug:
+ logging.debug("Actions".center(60, "-"))
+
+ for a_id, action in enumerate(actions_int):
+ if not self.heuristic_ai:
+ sc_action = self.get_agent_action(a_id, action)
+ else:
+ sc_action, action_num = self.get_agent_action_heuristic(
+ a_id, action
+ )
+ actions[a_id] = action_num
+ if sc_action:
+ sc_actions.append(sc_action)
+ # Send action request
+ req_actions = sc_pb.RequestAction(actions=sc_actions)
+
+ try:
+
+ if self.conic_fov:
+ self.render_fovs()
+ self._controller.actions(req_actions)
+ # Make step in SC2, i.e. apply actions
+ if not self.stochastic_health:
+ self._controller.step(self._step_mul)
+ else:
+ self._controller.step(
+ self._step_mul - self._kill_unit_step_mul
+ )
+ self._kill_units_below_health_level()
+ self._controller.step(self._kill_unit_step_mul)
+ # Observe here so that we know if the episode is over.
+ self._obs = self._controller.observe()
+ except (protocol.ProtocolError, protocol.ConnectionError):
+ self.full_restart()
+ return 0, True, {}
+
+ self._total_steps += 1
+ self._episode_steps += 1
+
+ # Update units
+ game_end_code = self.update_units()
+
+ terminated = False
+ reward = self.reward_battle()
+ info = {"battle_won": False}
+
+ # count units that are still alive
+ dead_allies, dead_enemies = 0, 0
+ for _al_id, al_unit in self.agents.items():
+ if al_unit.health == 0:
+ dead_allies += 1
+ for _e_id, e_unit in self.enemies.items():
+ if e_unit.health == 0:
+ dead_enemies += 1
+
+ info["dead_allies"] = dead_allies
+ info["dead_enemies"] = dead_enemies
+
+ if game_end_code is not None:
+ # Battle is over
+ terminated = True
+ self.battles_game += 1
+ if game_end_code == 1 and not self.win_counted:
+ self.battles_won += 1
+ self.win_counted = True
+ info["battle_won"] = True
+ if not self.reward_sparse:
+ reward += self.reward_win
+ else:
+ reward = 1
+ elif game_end_code == -1 and not self.defeat_counted:
+ self.defeat_counted = True
+ if not self.reward_sparse:
+ reward += self.reward_defeat
+ else:
+ reward = -1
+
+ elif self._episode_steps >= self.episode_limit:
+ # Episode limit reached
+ terminated = True
+ if self.continuing_episode:
+ info["episode_limit"] = True
+ self.battles_game += 1
+ self.timeouts += 1
+
+ if self.debug:
+ logging.debug("Reward = {}".format(reward).center(60, "-"))
+
+ if terminated:
+ self._episode_count += 1
+
+ if self.reward_scale:
+ reward /= self.max_reward / self.reward_scale_rate
+
+ self.reward = reward
+
+ return reward, terminated, info
+
+ def get_agent_action(self, a_id, action):
+ """Construct the action for agent a_id."""
+ avail_actions = self.get_avail_agent_actions(a_id)
+ assert (
+ avail_actions[action] == 1
+ ), "Agent {} cannot perform action {}".format(a_id, action)
+
+ unit = self.get_unit_by_id(a_id)
+ tag = unit.tag
+ x = unit.pos.x
+ y = unit.pos.y
+
+ if action == 0:
+ # no-op (valid only when dead)
+ assert unit.health == 0, "No-op only available for dead agents."
+ if self.debug:
+ logging.debug("Agent {}: Dead".format(a_id))
+ return None
+ elif action == 1:
+ # stop
+ cmd = r_pb.ActionRawUnitCommand(
+ ability_id=actions["stop"],
+ unit_tags=[tag],
+ queue_command=False,
+ )
+ if self.debug:
+ logging.debug("Agent {}: Stop".format(a_id))
+
+ elif action == 2:
+ # move north
+ cmd = r_pb.ActionRawUnitCommand(
+ ability_id=actions["move"],
+ target_world_space_pos=sc_common.Point2D(
+ x=x, y=y + self._move_amount
+ ),
+ unit_tags=[tag],
+ queue_command=False,
+ )
+ self.new_unit_positions[a_id] = np.array(
+ [x, y + self._move_amount]
+ )
+ if self.debug:
+ logging.debug("Agent {}: Move North".format(a_id))
+
+ elif action == 3:
+ # move south
+ cmd = r_pb.ActionRawUnitCommand(
+ ability_id=actions["move"],
+ target_world_space_pos=sc_common.Point2D(
+ x=x, y=y - self._move_amount
+ ),
+ unit_tags=[tag],
+ queue_command=False,
+ )
+ self.new_unit_positions[a_id] = np.array(
+ [x, y - self._move_amount]
+ )
+ if self.debug:
+ logging.debug("Agent {}: Move South".format(a_id))
+
+ elif action == 4:
+ # move east
+ cmd = r_pb.ActionRawUnitCommand(
+ ability_id=actions["move"],
+ target_world_space_pos=sc_common.Point2D(
+ x=x + self._move_amount, y=y
+ ),
+ unit_tags=[tag],
+ queue_command=False,
+ )
+ self.new_unit_positions[a_id] = np.array(
+ [x + self._move_amount, y]
+ )
+
+ if self.debug:
+ logging.debug("Agent {}: Move East".format(a_id))
+
+ elif action == 5:
+ # move west
+ cmd = r_pb.ActionRawUnitCommand(
+ ability_id=actions["move"],
+ target_world_space_pos=sc_common.Point2D(
+ x=x - self._move_amount, y=y
+ ),
+ unit_tags=[tag],
+ queue_command=False,
+ )
+ self.new_unit_positions[a_id] = np.array(
+ [x - self._move_amount, y]
+ )
+ if self.debug:
+ logging.debug("Agent {}: Move West".format(a_id))
+ elif self.conic_fov and action in range(6, 6 + self.n_fov_actions):
+ self.fov_directions[a_id] = self.canonical_fov_directions[
+ action - 6
+ ]
+ cmd = None
+ else:
+ # attack/heal units that are in range
+ target_id = action - self.n_actions_no_attack
+ if (
+ self.map_type in ["MMM", "terran_gen"]
+ and unit.unit_type == self.medivac_id
+ ):
+ target_unit = self.agents[target_id]
+ action_name = "heal"
+ else:
+ target_unit = self.enemies[target_id]
+ action_name = "attack"
+
+ if self.stochastic_attack:
+ p = np.random.default_rng().uniform()
+ if p > self.agent_attack_probabilities[a_id]:
+ if self.debug:
+ logging.debug(
+ f"Agent {a_id} {action_name}s {target_id}, but fails"
+ )
+ return None
+ action_id = actions[action_name]
+ target_tag = target_unit.tag
+
+ cmd = r_pb.ActionRawUnitCommand(
+ ability_id=action_id,
+ target_unit_tag=target_tag,
+ unit_tags=[tag],
+ queue_command=False,
+ )
+
+ if self.debug:
+ logging.debug(
+ "Agent {} {}s unit # {}".format(
+ a_id, action_name, target_id
+ )
+ )
+ if cmd:
+ sc_action = sc_pb.Action(
+ action_raw=r_pb.ActionRaw(unit_command=cmd)
+ )
+ return sc_action
+ return None
+
+ def get_agent_action_heuristic(self, a_id, action):
+ unit = self.get_unit_by_id(a_id)
+ tag = unit.tag
+
+ target = self.heuristic_targets[a_id]
+ if unit.unit_type == self.medivac_id:
+ if (
+ target is None
+ or self.agents[target].health == 0
+ or self.agents[target].health == self.agents[target].health_max
+ ):
+ min_dist = math.hypot(self.max_distance_x, self.max_distance_y)
+ min_id = -1
+ for al_id, al_unit in self.agents.items():
+ if al_unit.unit_type == self.medivac_id:
+ continue
+ if (
+ al_unit.health != 0
+ and al_unit.health != al_unit.health_max
+ ):
+ dist = self.distance(
+ unit.pos.x,
+ unit.pos.y,
+ al_unit.pos.x,
+ al_unit.pos.y,
+ )
+ if dist < min_dist:
+ min_dist = dist
+ min_id = al_id
+ self.heuristic_targets[a_id] = min_id
+ if min_id == -1:
+ self.heuristic_targets[a_id] = None
+ return None, 0
+ action_id = actions["heal"]
+ target_tag = self.agents[self.heuristic_targets[a_id]].tag
+ else:
+ if target is None or self.enemies[target].health == 0:
+ min_dist = math.hypot(self.max_distance_x, self.max_distance_y)
+ min_id = -1
+ for e_id, e_unit in self.enemies.items():
+ if (
+ unit.unit_type == self.marauder_id
+ and e_unit.unit_type == self.medivac_id
+ ):
+ continue
+ if e_unit.health > 0:
+ dist = self.distance(
+ unit.pos.x, unit.pos.y, e_unit.pos.x, e_unit.pos.y
+ )
+ if dist < min_dist:
+ min_dist = dist
+ min_id = e_id
+ self.heuristic_targets[a_id] = min_id
+ if min_id == -1:
+ self.heuristic_targets[a_id] = None
+ return None, 0
+ action_id = actions["attack"]
+ target_tag = self.enemies[self.heuristic_targets[a_id]].tag
+
+ action_num = self.heuristic_targets[a_id] + self.n_actions_no_attack
+
+ # Check if the action is available
+ if (
+ self.heuristic_rest
+ and self.get_avail_agent_actions(a_id)[action_num] == 0
+ ):
+
+ # Move towards the target rather than attacking/healing
+ if unit.unit_type == self.medivac_id:
+ target_unit = self.agents[self.heuristic_targets[a_id]]
+ else:
+ target_unit = self.enemies[self.heuristic_targets[a_id]]
+
+ delta_x = target_unit.pos.x - unit.pos.x
+ delta_y = target_unit.pos.y - unit.pos.y
+
+ if abs(delta_x) > abs(delta_y): # east or west
+ if delta_x > 0: # east
+ target_pos = sc_common.Point2D(
+ x=unit.pos.x + self._move_amount, y=unit.pos.y
+ )
+ action_num = 4
+ else: # west
+ target_pos = sc_common.Point2D(
+ x=unit.pos.x - self._move_amount, y=unit.pos.y
+ )
+ action_num = 5
+ else: # north or south
+ if delta_y > 0: # north
+ target_pos = sc_common.Point2D(
+ x=unit.pos.x, y=unit.pos.y + self._move_amount
+ )
+ action_num = 2
+ else: # south
+ target_pos = sc_common.Point2D(
+ x=unit.pos.x, y=unit.pos.y - self._move_amount
+ )
+ action_num = 3
+
+ cmd = r_pb.ActionRawUnitCommand(
+ ability_id=actions["move"],
+ target_world_space_pos=target_pos,
+ unit_tags=[tag],
+ queue_command=False,
+ )
+ else:
+ # Attack/heal the target
+ cmd = r_pb.ActionRawUnitCommand(
+ ability_id=action_id,
+ target_unit_tag=target_tag,
+ unit_tags=[tag],
+ queue_command=False,
+ )
+
+ sc_action = sc_pb.Action(action_raw=r_pb.ActionRaw(unit_command=cmd))
+ return sc_action, action_num
+
+ def reward_battle(self):
+ """Reward function when self.reward_spare==False.
+ Returns accumulative hit/shield point damage dealt to the enemy
+ + reward_death_value per enemy unit killed, and, in case
+ self.reward_only_positive == False, - (damage dealt to ally units
+ + reward_death_value per ally unit killed) * self.reward_negative_scale
+ """
+ assert (
+ not self.stochastic_health or self.reward_only_positive
+ ), "Different Health Levels are currently only compatible with positive rewards"
+ if self.reward_sparse:
+ return 0
+
+ reward = 0
+ delta_deaths = 0
+ delta_ally = 0
+ delta_enemy = 0
+
+ neg_scale = self.reward_negative_scale
+
+ # update deaths
+ for al_id, al_unit in self.agents.items():
+ if not self.death_tracker_ally[al_id]:
+ # did not die so far
+ prev_health = (
+ self.previous_ally_units[al_id].health
+ + self.previous_ally_units[al_id].shield
+ )
+ if al_unit.health == 0:
+ # just died
+ self.death_tracker_ally[al_id] = 1
+ if not self.reward_only_positive:
+ delta_deaths -= self.reward_death_value * neg_scale
+ delta_ally += prev_health * neg_scale
+ else:
+ # still alive
+ delta_ally += neg_scale * (
+ prev_health - al_unit.health - al_unit.shield
+ )
+
+ for e_id, e_unit in self.enemies.items():
+ if not self.death_tracker_enemy[e_id]:
+ prev_health = (
+ self.previous_enemy_units[e_id].health
+ + self.previous_enemy_units[e_id].shield
+ )
+ if e_unit.health == 0:
+ self.death_tracker_enemy[e_id] = 1
+ delta_deaths += self.reward_death_value
+ delta_enemy += prev_health
+ else:
+ delta_enemy += prev_health - e_unit.health - e_unit.shield
+
+ if self.reward_only_positive:
+ reward = max(delta_enemy + delta_deaths, 0) # shield regeneration
+ else:
+ reward = delta_enemy + delta_deaths - delta_ally
+
+ return reward
+
+ def get_total_actions(self):
+ """Returns the total number of actions an agent could ever take."""
+ return self.n_actions
+
+ @staticmethod
+ def distance(x1, y1, x2, y2):
+ """Distance between two points."""
+ return math.hypot(x2 - x1, y2 - y1)
+
+ def unit_shoot_range(self, agent_id):
+ """Returns the shooting range for an agent."""
+ return 6
+
+ def unit_sight_range(self, agent_id):
+ """Returns the sight range for an agent."""
+ return 9
+
+ def unit_max_cooldown(self, unit):
+ """Returns the maximal cooldown for a unit."""
+ switcher = {
+ self.marine_id: 15,
+ self.marauder_id: 25,
+ self.medivac_id: 200, # max energy
+ self.stalker_id: 35,
+ self.zealot_id: 22,
+ self.colossus_id: 24,
+ self.hydralisk_id: 10,
+ self.zergling_id: 11,
+ self.baneling_id: 1,
+ }
+ return switcher.get(unit.unit_type, 15)
+
+ def save_replay(self):
+ """Save a replay."""
+ prefix = self.replay_prefix or self.map_name
+ replay_dir = self.replay_dir or ""
+ replay_path = self._run_config.save_replay(
+ self._controller.save_replay(),
+ replay_dir=replay_dir,
+ prefix=prefix,
+ )
+ logging.info("Replay saved at: %s" % replay_path)
+
+ def unit_max_shield(self, unit):
+ """Returns maximal shield for a given unit."""
+ if unit.unit_type == 74 or unit.unit_type == self.stalker_id:
+ return 80 # Protoss's Stalker
+ elif unit.unit_type == 73 or unit.unit_type == self.zealot_id:
+ return 50 # Protoss's Zealot
+ elif unit.unit_type == 4 or unit.unit_type == self.colossus_id:
+ return 150 # Protoss's Colossus
+ else:
+ raise Exception("Maximum shield not recognised")
+
+ def build_state_feature_names(self):
+ """Return the state feature names."""
+ if self.obs_instead_of_state:
+ raise NotImplementedError
+
+ feature_names = []
+
+ # Ally features.
+ for al_id in range(self.n_agents):
+ feature_names.append(f"ally_health_{al_id}")
+ feature_names.append(f"ally_cooldown_{al_id}")
+ feature_names.append(f"ally_relative_x_{al_id}")
+ feature_names.append(f"ally_relative_y_{al_id}")
+
+ if self.shield_bits_ally > 0:
+ feature_names.append(f"ally_shield_{al_id}")
+
+ if self.stochastic_attack:
+ feature_names.append(f"ally_attack_prob_{al_id}")
+
+ if self.stochastic_health:
+ feature_names.append(f"ally_health_level_{al_id}")
+
+ if self.conic_fov:
+ feature_names.append(f"ally_fov_x_{al_id}")
+ feature_names.append(f"ally_fov_y_{al_id}")
+
+ if self.unit_type_bits > 0:
+ for bit in range(self.unit_type_bits):
+ feature_names.append(f"ally_unit_type_{al_id}_bit_{bit}")
+
+ # Enemy features.
+ for e_id in range(self.n_enemies):
+ feature_names.append(f"enemy_health_{e_id}")
+ feature_names.append(f"enemy_relative_x_{e_id}")
+ feature_names.append(f"enemy_relative_y_{e_id}")
+
+ if self.shield_bits_enemy > 0:
+ feature_names.append(f"enemy_shield_{e_id}")
+
+ if self.unit_type_bits > 0:
+ for bit in range(self.unit_type_bits):
+ feature_names.append(f"enemy_unit_type_{e_id}_bit_{bit}")
+
+ if self.state_last_action:
+ for al_id in range(self.n_agents):
+ for action_idx in range(self.n_actions):
+ feature_names.append(
+ f"ally_last_action_{al_id}_action_{action_idx}"
+ )
+
+ if self.state_timestep_number:
+ feature_names.append("timestep")
+
+ return feature_names
+
+ def get_state_feature_names(self):
+ return self.state_feature_names
+
+ def build_obs_feature_names(self):
+ """Return the observations feature names."""
+ feature_names = []
+
+ # Movement features.
+ feature_names.extend(
+ [
+ "move_action_north",
+ "move_action_south",
+ "move_action_east",
+ "move_action_west",
+ ]
+ )
+ if self.obs_pathing_grid:
+ feature_names.extend(
+ [f"pathing_grid_{n}" for n in range(self.n_obs_pathing)]
+ )
+ if self.obs_terrain_height:
+ feature_names.extend(
+ [f"terrain_height_{n}" for n in range(self.n_obs_height)]
+ )
+
+ # Enemy features.
+ for e_id in range(self.n_enemies):
+ feature_names.extend(
+ [
+ f"enemy_shootable_{e_id}",
+ f"enemy_distance_{e_id}",
+ f"enemy_relative_x_{e_id}",
+ f"enemy_relative_y_{e_id}",
+ ]
+ )
+ if self.obs_all_health:
+ feature_names.append(f"enemy_health_{e_id}")
+ if self.obs_all_health and self.shield_bits_enemy > 0:
+ feature_names.append(f"enemy_shield_{e_id}")
+ if self.unit_type_bits > 0:
+ feature_names.extend(
+ [
+ f"enemy_unit_type_{e_id}_bit_{bit}"
+ for bit in range(self.unit_type_bits)
+ ]
+ )
+
+ # Ally features.
+ # From the perspective of agent 0.
+ al_ids = [al_id for al_id in range(self.n_agents) if al_id != 0]
+ for al_id in al_ids:
+ feature_names.extend(
+ [
+ f"ally_visible_{al_id}",
+ f"ally_distance_{al_id}",
+ f"ally_relative_x_{al_id}",
+ f"ally_relative_y_{al_id}",
+ ]
+ )
+ if self.obs_all_health:
+ feature_names.append(f"ally_health_{al_id}")
+ if self.shield_bits_ally > 0:
+ feature_names.append(f"ally_shield_{al_id}")
+ if self.stochastic_attack and (
+ self.observe_attack_probs or self.zero_pad_stochastic_attack
+ ):
+ feature_names.append(f"ally_attack_prob_{al_id}")
+ if self.stochastic_health and (
+ self.observe_teammate_health or self.zero_pad_health
+ ):
+ feature_names.append(f"ally_health_level_{al_id}")
+ if self.unit_type_bits > 0 and (
+ (not self.replace_teammates or self.observe_teammate_types)
+ or self.zero_pad_unit_types
+ ):
+ feature_names.extend(
+ [
+ f"ally_unit_type_{al_id}_bit_{bit}"
+ for bit in range(self.unit_type_bits)
+ ]
+ )
+ if self.obs_last_action:
+ feature_names.extend(
+ [
+ f"ally_last_action_{al_id}_action_{action}"
+ for action in range(self.n_actions)
+ ]
+ )
+
+ # Own features.
+ if self.obs_own_health:
+ feature_names.append("own_health")
+ if self.shield_bits_ally > 0:
+ feature_names.append("own_shield")
+ if self.stochastic_attack:
+ feature_names.append("own_attack_prob")
+ if self.stochastic_health:
+ feature_names.append("own_health_level")
+ if self.obs_own_pos:
+ feature_names.extend(["own_pos_x", "own_pos_y"])
+ if self.conic_fov:
+ feature_names.extend(["own_fov_x", "own_fov_y"])
+ if self.unit_type_bits > 0:
+ feature_names.extend(
+ [
+ f"own_unit_type_bit_{bit}"
+ for bit in range(self.unit_type_bits)
+ ]
+ )
+ if not self.obs_starcraft:
+ feature_names = []
+
+ if self.obs_timestep_number:
+ feature_names.append("timestep")
+
+ return feature_names
+
+ def get_obs_feature_names(self):
+ return self.obs_feature_names
+
+ def can_move(self, unit, direction):
+ """Whether a unit can move in a given direction."""
+ m = self._move_amount / 2
+
+ if direction == Direction.NORTH:
+ x, y = int(unit.pos.x), int(unit.pos.y + m)
+ elif direction == Direction.SOUTH:
+ x, y = int(unit.pos.x), int(unit.pos.y - m)
+ elif direction == Direction.EAST:
+ x, y = int(unit.pos.x + m), int(unit.pos.y)
+ else:
+ x, y = int(unit.pos.x - m), int(unit.pos.y)
+
+ if self.check_bounds(x, y) and self.pathing_grid[x, y]:
+ return True
+
+ return False
+
+ def get_surrounding_points(self, unit, include_self=False):
+ """Returns the surrounding points of the unit in 8 directions."""
+ x = int(unit.pos.x)
+ y = int(unit.pos.y)
+
+ ma = self._move_amount
+
+ points = [
+ (x, y + 2 * ma),
+ (x, y - 2 * ma),
+ (x + 2 * ma, y),
+ (x - 2 * ma, y),
+ (x + ma, y + ma),
+ (x - ma, y - ma),
+ (x + ma, y - ma),
+ (x - ma, y + ma),
+ ]
+
+ if include_self:
+ points.append((x, y))
+
+ return points
+
+ def check_bounds(self, x, y):
+ """Whether a point is within the map bounds."""
+ return 0 <= x < self.map_x and 0 <= y < self.map_y
+
+ def get_surrounding_pathing(self, unit):
+ """Returns pathing values of the grid surrounding the given unit."""
+ points = self.get_surrounding_points(unit, include_self=False)
+ vals = [
+ self.pathing_grid[x, y] if self.check_bounds(x, y) else 1
+ for x, y in points
+ ]
+ return vals
+
+ def get_surrounding_height(self, unit):
+ """Returns height values of the grid surrounding the given unit."""
+ points = self.get_surrounding_points(unit, include_self=True)
+ vals = [
+ self.terrain_height[x, y] if self.check_bounds(x, y) else 1
+ for x, y in points
+ ]
+ return vals
+
+ def _compute_health(self, agent_id, unit):
+ """Each agent has a health bar with max health
+ `health_max` and current health `health`. We set a level
+ `health_level` between `0` and `1` where the agent dies if its
+ proportional health (`health / health_max`) is below that level.
+ This function rescales health to take into account this death level.
+
+ In the proportional health scale we have something that looks like this:
+
+ -------------------------------------------------------------
+ 0 1
+ ^ health_level ^ proportional_health
+ And so we compute
+ (proportional_health - health_level) / (1 - health_level)
+ """
+ proportional_health = unit.health / unit.health_max
+ health_level = self.agent_health_levels[agent_id]
+ return (1.0 / (1 - health_level)) * (
+ proportional_health - health_level
+ )
+
+ def render_fovs(self):
+ lines_to_render = []
+ for agent_id in range(self.n_agents):
+ if not self.death_tracker_ally[agent_id]:
+ lines_to_render.extend(self.agent_cone(agent_id))
+ debug_command = d_pb.DebugCommand(
+ draw=d_pb.DebugDraw(lines=lines_to_render)
+ )
+ self._controller.debug(debug_command)
+
+ def agent_cone(self, agent_id):
+ fov_direction = self.fov_directions[agent_id]
+ c, s = np.cos(self.conic_fov_angle / 2), np.sin(
+ self.conic_fov_angle / 2
+ )
+ sight_range = self.unit_sight_range(agent_id)
+ rot = np.array([[c, -s], [s, c]])
+ neg_rot = np.array([[c, s], [-s, c]])
+ start_pos = self.new_unit_positions[agent_id]
+ init_pos = sc_common.Point(
+ x=start_pos[0],
+ y=start_pos[1],
+ z=self.get_unit_by_id(agent_id).pos.z,
+ )
+ upper_cone_end = start_pos + (rot @ fov_direction) * sight_range
+ lower_cone_end = start_pos + (neg_rot @ fov_direction) * sight_range
+ lines = [
+ d_pb.DebugLine(
+ line=d_pb.Line(
+ p0=init_pos,
+ p1=sc_common.Point(
+ x=upper_cone_end[0],
+ y=upper_cone_end[1],
+ z=init_pos.z,
+ ),
+ )
+ ),
+ d_pb.DebugLine(
+ line=d_pb.Line(
+ p0=init_pos,
+ p1=sc_common.Point(
+ x=lower_cone_end[0],
+ y=lower_cone_end[1],
+ z=init_pos.z,
+ ),
+ )
+ ),
+ ]
+ return lines
+
+ def is_position_in_cone(self, agent_id, pos, range="sight_range"):
+ ally_pos = self.get_unit_by_id(agent_id).pos
+ distance = self.distance(ally_pos.x, ally_pos.y, pos.x, pos.y)
+ # position is in this agent's cone if it is not outside the sight
+ # range and has the correct angle
+ if range == "sight_range":
+ unit_range = self.unit_sight_range(agent_id)
+ elif range == "shoot_range":
+ unit_range = self.unit_shoot_range(agent_id)
+ else:
+ raise Exception("Range argument not recognised")
+ if distance > unit_range:
+ return False
+ x_diff = pos.x - ally_pos.x
+ x_diff = max(x_diff, EPS) if x_diff > 0 else min(x_diff, -EPS)
+ obj_angle = np.arctan((pos.y - ally_pos.y) / x_diff)
+ x = self.fov_directions[agent_id][0]
+ x = max(x, EPS) if x_diff > 0 else min(x, -EPS)
+ fov_angle = np.arctan(self.fov_directions[agent_id][1] / x)
+ return np.abs(obj_angle - fov_angle) < self.conic_fov_angle / 2
+
+ def get_obs_agent(self, agent_id, fully_observable=False):
+ """Returns observation for agent_id. The observation is composed of:
+
+ - agent movement features (where it can move to, height information
+ and pathing grid)
+ - enemy features (available_to_attack, health, relative_x, relative_y,
+ shield, unit_type)
+ - ally features (visible, distance, relative_x, relative_y, shield,
+ unit_type)
+ - agent unit features (health, shield, unit_type)
+
+ All of this information is flattened and concatenated into a list,
+ in the aforementioned order. To know the sizes of each of the
+ features inside the final list of features, take a look at the
+ functions ``get_obs_move_feats_size()``,
+ ``get_obs_enemy_feats_size()``, ``get_obs_ally_feats_size()`` and
+ ``get_obs_own_feats_size()``.
+
+ The size of the observation vector may vary, depending on the
+ environment configuration and type of units present in the map.
+ For instance, non-Protoss units will not have shields, movement
+ features may or may not include terrain height and pathing grid,
+ unit_type is not included if there is only one type of unit in the
+ map etc.).
+
+ NOTE: Agents should have access only to their local observations
+ during decentralised execution.
+
+ fully_observable: -- ignores sight range for a particular unit.
+ For Debugging purposes ONLY -- not a fair observation.
+ """
+ unit = self.get_unit_by_id(agent_id)
+
+ move_feats_dim = self.get_obs_move_feats_size()
+ enemy_feats_dim = self.get_obs_enemy_feats_size()
+ ally_feats_dim = self.get_obs_ally_feats_size()
+ own_feats_dim = self.get_obs_own_feats_size()
+
+ move_feats = np.zeros(move_feats_dim, dtype=np.float32)
+ enemy_feats = np.zeros(enemy_feats_dim, dtype=np.float32)
+ ally_feats = np.zeros(ally_feats_dim, dtype=np.float32)
+ own_feats = np.zeros(own_feats_dim, dtype=np.float32)
+
+ if (
+ unit.health > 0 and self.obs_starcraft
+ ): # otherwise dead, return all zeros
+ x = unit.pos.x
+ y = unit.pos.y
+ sight_range = self.unit_sight_range(agent_id)
+
+ # Movement features. Do not need similar for looking
+ # around because this is always possible
+ avail_actions = self.get_avail_agent_actions(agent_id)
+ for m in range(self.n_actions_move):
+ move_feats[m] = avail_actions[m + 2]
+
+ ind = self.n_actions_move
+
+ if self.obs_pathing_grid:
+ move_feats[
+ ind : ind + self.n_obs_pathing # noqa
+ ] = self.get_surrounding_pathing(unit)
+ ind += self.n_obs_pathing
+
+ if self.obs_terrain_height:
+ move_feats[ind:] = self.get_surrounding_height(unit)
+
+ # Enemy features
+ for e_id, e_unit in self.enemies.items():
+ e_x = e_unit.pos.x
+ e_y = e_unit.pos.y
+ dist = self.distance(x, y, e_x, e_y)
+ enemy_visible = (
+ self.is_position_in_cone(agent_id, e_unit.pos)
+ if self.conic_fov
+ else dist < sight_range
+ )
+ if (enemy_visible and e_unit.health > 0) or (
+ e_unit.health > 0 and fully_observable
+ ): # visible and alive
+ # Sight range > shoot range
+ enemy_feats[e_id, 0] = avail_actions[
+ self.n_actions_no_attack + e_id
+ ] # available
+ enemy_feats[e_id, 1] = dist / sight_range # distance
+ enemy_feats[e_id, 2] = (
+ e_x - x
+ ) / sight_range # relative X
+ enemy_feats[e_id, 3] = (
+ e_y - y
+ ) / sight_range # relative Y
+ show_enemy = (
+ self.mask_enemies
+ and not self.enemy_mask[agent_id][e_id]
+ ) or not self.mask_enemies
+ ind = 4
+ if self.obs_all_health and show_enemy:
+ enemy_feats[e_id, ind] = (
+ e_unit.health / e_unit.health_max
+ ) # health
+ ind += 1
+ if self.shield_bits_enemy > 0:
+ max_shield = self.unit_max_shield(e_unit)
+ enemy_feats[e_id, ind] = (
+ e_unit.shield / max_shield
+ ) # shield
+ ind += 1
+
+ if self.unit_type_bits > 0 and show_enemy:
+ type_id = self.get_unit_type_id(e_unit, False)
+ enemy_feats[e_id, ind + type_id] = 1 # unit type
+
+ # Ally features
+ al_ids = [
+ al_id for al_id in range(self.n_agents) if al_id != agent_id
+ ]
+ for i, al_id in enumerate(al_ids):
+
+ al_unit = self.get_unit_by_id(al_id)
+ al_x = al_unit.pos.x
+ al_y = al_unit.pos.y
+ dist = self.distance(x, y, al_x, al_y)
+ ally_visible = (
+ self.is_position_in_cone(agent_id, al_unit.pos)
+ if self.conic_fov
+ else dist < sight_range
+ )
+ if (ally_visible and al_unit.health > 0) or (
+ al_unit.health > 0 and fully_observable
+ ): # visible and alive
+ ally_feats[i, 0] = 1 # visible
+ ally_feats[i, 1] = dist / sight_range # distance
+ ally_feats[i, 2] = (al_x - x) / sight_range # relative X
+ ally_feats[i, 3] = (al_y - y) / sight_range # relative Y
+
+ ind = 4
+ if self.obs_all_health:
+ if not self.stochastic_health:
+ ally_feats[i, ind] = (
+ al_unit.health / al_unit.health_max
+ ) # health
+ ind += 1
+ elif self.observe_teammate_health:
+ ally_feats[i, ind] = self._compute_health(
+ agent_id=al_id, unit=al_unit
+ )
+ ind += 1
+ elif self.zero_pad_health:
+ ind += 1
+ if self.shield_bits_ally > 0:
+ max_shield = self.unit_max_shield(al_unit)
+ ally_feats[i, ind] = (
+ al_unit.shield / max_shield
+ ) # shield
+ ind += 1
+ if self.stochastic_attack and self.observe_attack_probs:
+ ally_feats[i, ind] = self.agent_attack_probabilities[
+ al_id
+ ]
+ ind += 1
+ elif (
+ self.stochastic_attack
+ and self.zero_pad_stochastic_attack
+ ):
+ ind += 1
+
+ if self.stochastic_health and self.observe_teammate_health:
+ ally_feats[i, ind] = self.agent_health_levels[al_id]
+ ind += 1
+ elif self.stochastic_health and self.zero_pad_health:
+ ind += 1
+ if self.unit_type_bits > 0 and (
+ not self.replace_teammates
+ or self.observe_teammate_types
+ ):
+ type_id = self.get_unit_type_id(al_unit, True)
+ ally_feats[i, ind + type_id] = 1
+ ind += self.unit_type_bits
+ elif self.unit_type_bits > 0 and self.zero_pad_unit_types:
+ ind += self.unit_type_bits
+ if self.obs_last_action:
+ ally_feats[i, ind:] = self.last_action[al_id]
+
+ # Own features
+ ind = 0
+ if self.obs_own_health:
+ if not self.stochastic_health:
+ own_feats[ind] = unit.health / unit.health_max
+ else:
+ own_feats[ind] = self._compute_health(agent_id, unit)
+ ind += 1
+ if self.shield_bits_ally > 0:
+ max_shield = self.unit_max_shield(unit)
+ own_feats[ind] = unit.shield / max_shield
+ ind += 1
+
+ if self.stochastic_attack:
+ own_feats[ind] = self.agent_attack_probabilities[agent_id]
+ ind += 1
+ if self.stochastic_health:
+ own_feats[ind] = self.agent_health_levels[agent_id]
+ ind += 1
+ if self.obs_own_pos:
+ own_feats[ind] = x / self.map_x
+ own_feats[ind + 1] = y / self.map_y
+ ind += 2
+ if self.conic_fov:
+ own_feats[ind : ind + 2] = self.fov_directions[agent_id]
+ ind += 2
+ if self.unit_type_bits > 0:
+ type_id = self.get_unit_type_id(unit, True)
+ own_feats[ind + type_id] = 1
+ if self.obs_starcraft:
+ agent_obs = np.concatenate(
+ (
+ move_feats.flatten(),
+ enemy_feats.flatten(),
+ ally_feats.flatten(),
+ own_feats.flatten(),
+ )
+ )
+
+ if self.obs_timestep_number:
+ if self.obs_starcraft:
+ agent_obs = np.append(
+ agent_obs, self._episode_steps / self.episode_limit
+ )
+ else:
+ agent_obs = np.zeros(1, dtype=np.float32)
+ agent_obs[:] = self._episode_steps / self.episode_limit
+
+ if self.debug:
+ logging.debug("Obs Agent: {}".format(agent_id).center(60, "-"))
+ logging.debug(
+ "Avail. actions {}".format(
+ self.get_avail_agent_actions(agent_id)
+ )
+ )
+ logging.debug("Move feats {}".format(move_feats))
+ logging.debug("Enemy feats {}".format(enemy_feats))
+ logging.debug("Ally feats {}".format(ally_feats))
+ logging.debug("Own feats {}".format(own_feats))
+
+ return agent_obs
+
+ def get_obs(self):
+ """Returns all agent observations in a list.
+ NOTE: Agents should have access only to their local observations
+ during decentralised execution.
+ """
+ agents_obs = [
+ self.get_obs_agent(i, fully_observable=self.fully_observable)
+ for i in range(self.n_agents)
+ ]
+ return agents_obs
+
+ def get_capabilities_agent(self, agent_id):
+ unit = self.get_unit_by_id(agent_id)
+ cap_feats = np.zeros(self.get_cap_size(), dtype=np.float32)
+
+ ind = 0
+ if self.stochastic_attack:
+ cap_feats[ind] = self.agent_attack_probabilities[agent_id]
+ ind += 1
+ if self.stochastic_health:
+ cap_feats[ind] = self.agent_health_levels[agent_id]
+ ind += 1
+ if self.unit_type_bits > 0:
+ type_id = self.get_unit_type_id(unit, True)
+ cap_feats[ind + type_id] = 1
+
+ return cap_feats
+
+ def get_capabilities(self):
+ """Returns all agent capabilities in a list."""
+ agents_cap = [
+ self.get_capabilities_agent(i) for i in range(self.n_agents)
+ ]
+ agents_cap = np.concatenate(agents_cap, axis=0).astype(np.float32)
+ return agents_cap
+
+ def get_state(self):
+ """Returns the global state.
+ NOTE: This function should not be used during decentralised execution.
+ """
+ if self.obs_instead_of_state:
+ obs_concat = np.concatenate(self.get_obs(), axis=0).astype(
+ np.float32
+ )
+ return obs_concat
+
+ state_dict = self.get_state_dict()
+
+ state = np.append(
+ state_dict["allies"].flatten(), state_dict["enemies"].flatten()
+ )
+ if "last_action" in state_dict:
+ state = np.append(state, state_dict["last_action"].flatten())
+ if "timestep" in state_dict:
+ state = np.append(state, state_dict["timestep"])
+
+ state = state.astype(dtype=np.float32)
+
+ if self.debug:
+ logging.debug("STATE".center(60, "-"))
+ logging.debug("Ally state {}".format(state_dict["allies"]))
+ logging.debug("Enemy state {}".format(state_dict["enemies"]))
+ if self.state_last_action:
+ logging.debug("Last actions {}".format(self.last_action))
+
+ return state
+
+ def get_ally_num_attributes(self):
+ return len(self.ally_state_attr_names) + len(
+ self.capability_attr_names
+ )
+
+ def get_enemy_num_attributes(self):
+ return len(self.enemy_state_attr_names)
+
+ def get_state_dict(self):
+ """Returns the global state as a dictionary.
+
+ - allies: numpy array containing agents and their attributes
+ - enemies: numpy array containing enemies and their attributes
+ - last_action: numpy array of previous actions for each agent
+ - timestep: current no. of steps divided by total no. of steps
+
+ NOTE: This function should not be used during decentralised execution.
+ """
+
+ # number of features equals the number of attribute names
+ nf_al = self.get_ally_num_attributes()
+ nf_en = self.get_enemy_num_attributes()
+
+ ally_state = np.zeros((self.n_agents, nf_al))
+ enemy_state = np.zeros((self.n_enemies, nf_en))
+
+ center_x = self.map_x / 2
+ center_y = self.map_y / 2
+
+ for al_id, al_unit in self.agents.items():
+ if al_unit.health > 0:
+ x = al_unit.pos.x
+ y = al_unit.pos.y
+ max_cd = self.unit_max_cooldown(al_unit)
+ if not self.stochastic_health:
+ ally_state[al_id, 0] = (
+ al_unit.health / al_unit.health_max
+ ) # health
+ else:
+ ally_state[al_id, 0] = self._compute_health(al_id, al_unit)
+ if (
+ self.map_type in ["MMM", "terran_gen"]
+ and al_unit.unit_type == self.medivac_id
+ ):
+ ally_state[al_id, 1] = al_unit.energy / max_cd # energy
+ else:
+ ally_state[al_id, 1] = (
+ al_unit.weapon_cooldown / max_cd
+ ) # cooldown
+ ally_state[al_id, 2] = (
+ x - center_x
+ ) / self.max_distance_x # relative X
+ ally_state[al_id, 3] = (
+ y - center_y
+ ) / self.max_distance_y # relative Y
+
+ ind = 4
+ if self.shield_bits_ally > 0:
+ max_shield = self.unit_max_shield(al_unit)
+ ally_state[al_id, ind] = (
+ al_unit.shield / max_shield
+ ) # shield
+ ind += 1
+
+ if self.stochastic_attack:
+ ally_state[al_id, ind] = self.agent_attack_probabilities[
+ al_id
+ ]
+ ind += 1
+ if self.stochastic_health:
+ ally_state[al_id, ind] = self.agent_health_levels[al_id]
+ ind += 1
+ if self.conic_fov:
+ ally_state[al_id, ind : ind + 2] = self.fov_directions[
+ al_id
+ ]
+ if self.unit_type_bits > 0:
+ type_id = self.get_unit_type_id(al_unit, True)
+ ally_state[al_id, type_id - self.unit_type_bits] = 1
+
+ for e_id, e_unit in self.enemies.items():
+ if e_unit.health > 0:
+ x = e_unit.pos.x
+ y = e_unit.pos.y
+
+ enemy_state[e_id, 0] = (
+ e_unit.health / e_unit.health_max
+ ) # health
+ enemy_state[e_id, 1] = (
+ x - center_x
+ ) / self.max_distance_x # relative X
+ enemy_state[e_id, 2] = (
+ y - center_y
+ ) / self.max_distance_y # relative Y
+
+ if self.shield_bits_enemy > 0:
+ max_shield = self.unit_max_shield(e_unit)
+ enemy_state[e_id, 3] = e_unit.shield / max_shield # shield
+
+ if self.unit_type_bits > 0:
+ type_id = self.get_unit_type_id(e_unit, False)
+ enemy_state[e_id, type_id - self.unit_type_bits] = 1
+
+ state = {"allies": ally_state, "enemies": enemy_state}
+
+ if self.state_last_action:
+ state["last_action"] = self.last_action
+ if self.state_timestep_number:
+ state["timestep"] = self._episode_steps / self.episode_limit
+
+ return state
+
+ def get_obs_enemy_feats_size(self):
+ """Returns the dimensions of the matrix containing enemy features.
+ Size is n_enemies x n_features.
+ """
+ nf_en = 4 + self.unit_type_bits
+
+ if self.obs_all_health:
+ nf_en += 1 + self.shield_bits_enemy
+
+ return self.n_enemies, nf_en
+
+ def get_obs_ally_feats_size(self):
+ """Returns the dimensions of the matrix containing ally features.
+ Size is n_allies x n_features.
+ """
+ nf_al = 4
+ nf_cap = self.get_obs_ally_capability_size()
+
+ if self.obs_all_health:
+ nf_al += 1 + self.shield_bits_ally
+
+ if self.obs_last_action:
+ nf_al += self.n_actions
+
+ return self.n_agents - 1, nf_al + nf_cap
+
+ def get_obs_own_feats_size(self):
+ """
+ Returns the size of the vector containing the agents' own features.
+ """
+ own_feats = self.get_cap_size()
+ if self.obs_own_health and self.obs_starcraft:
+ own_feats += 1 + self.shield_bits_ally
+ if self.conic_fov and self.obs_starcraft:
+ own_feats += 2
+ if self.obs_own_pos and self.obs_starcraft:
+ own_feats += 2
+ return own_feats
+
+ def get_obs_move_feats_size(self):
+ """Returns the size of the vector containing the agents's movement-
+ related features.
+ """
+ move_feats = self.n_actions_move
+ if self.obs_pathing_grid:
+ move_feats += self.n_obs_pathing
+ if self.obs_terrain_height:
+ move_feats += self.n_obs_height
+
+ return move_feats
+
+ def get_obs_ally_capability_size(self):
+ """Returns the size of capabilities observed by teammates."""
+ cap_feats = self.unit_type_bits
+ if self.stochastic_attack and (
+ self.zero_pad_stochastic_attack or self.observe_attack_probs
+ ):
+ cap_feats += 1
+ if self.stochastic_health and (
+ self.observe_teammate_health or self.zero_pad_health
+ ):
+ cap_feats += 1
+
+ return cap_feats
+
+ def get_cap_size(self):
+ """Returns the size of the own capabilities of the agent."""
+ cap_feats = 0
+ if self.stochastic_attack:
+ cap_feats += 1
+ if self.stochastic_health:
+ cap_feats += 1
+ if self.unit_type_bits > 0:
+ cap_feats += self.unit_type_bits
+
+ return cap_feats
+
+ def get_obs_size(self):
+ """Returns the size of the observation."""
+ own_feats = self.get_obs_own_feats_size()
+ move_feats = self.get_obs_move_feats_size()
+
+ n_enemies, n_enemy_feats = self.get_obs_enemy_feats_size()
+ n_allies, n_ally_feats = self.get_obs_ally_feats_size()
+
+ enemy_feats = n_enemies * n_enemy_feats
+ ally_feats = n_allies * n_ally_feats
+ if self.obs_starcraft:
+ return (
+ self.obs_timestep_number
+ + move_feats
+ + enemy_feats
+ + ally_feats
+ + own_feats
+ )
+ else:
+ return 1 if self.obs_timestep_number else 0
+
+ def get_state_size(self):
+ """Returns the size of the global state."""
+ if self.obs_instead_of_state:
+ return self.get_obs_size() * self.n_agents
+
+ nf_al = self.get_ally_num_attributes()
+ nf_en = self.get_enemy_num_attributes()
+
+ enemy_state = self.n_enemies * nf_en
+ ally_state = self.n_agents * nf_al
+
+ size = enemy_state + ally_state
+
+ if self.state_last_action:
+ size += self.n_agents * self.n_actions
+ if self.state_timestep_number:
+ size += 1
+
+ return size
+
+ def get_visibility_matrix(self):
+ """Returns a boolean numpy array of dimensions
+ (n_agents, n_agents + n_enemies) indicating which units
+ are visible to each agent.
+ """
+ arr = np.zeros(
+ (self.n_agents, self.n_agents + self.n_enemies),
+ dtype=np.bool,
+ )
+
+ for agent_id in range(self.n_agents):
+ current_agent = self.get_unit_by_id(agent_id)
+ if current_agent.health > 0: # it agent not dead
+ x = current_agent.pos.x
+ y = current_agent.pos.y
+ sight_range = self.unit_sight_range(agent_id)
+
+ # Enemies
+ for e_id, e_unit in self.enemies.items():
+ e_x = e_unit.pos.x
+ e_y = e_unit.pos.y
+ dist = self.distance(x, y, e_x, e_y)
+
+ if dist < sight_range and e_unit.health > 0:
+ # visible and alive
+ arr[agent_id, self.n_agents + e_id] = 1
+
+ # The matrix for allies is filled symmetrically
+ al_ids = [
+ al_id for al_id in range(self.n_agents) if al_id > agent_id
+ ]
+ for _, al_id in enumerate(al_ids):
+ al_unit = self.get_unit_by_id(al_id)
+ al_x = al_unit.pos.x
+ al_y = al_unit.pos.y
+ dist = self.distance(x, y, al_x, al_y)
+
+ if dist < sight_range and al_unit.health > 0:
+ # visible and alive
+ arr[agent_id, al_id] = arr[al_id, agent_id] = 1
+
+ return arr
+
+ def get_unit_type_id(self, unit, ally):
+ """Returns the ID of unit type in the given scenario."""
+
+ if self.map_type == "protoss_gen":
+ if unit.unit_type in (self.stalker_id, Protoss.Stalker):
+ return 0
+ if unit.unit_type in (self.zealot_id, Protoss.Zealot):
+ return 1
+ if unit.unit_type in (self.colossus_id, Protoss.Colossus):
+ return 2
+ raise AttributeError()
+ if self.map_type == "terran_gen":
+ if unit.unit_type in (self.marine_id, Terran.Marine):
+ return 0
+ if unit.unit_type in (self.marauder_id, Terran.Marauder):
+ return 1
+ if unit.unit_type in (self.medivac_id, Terran.Medivac):
+ return 2
+ raise AttributeError()
+
+ if self.map_type == "zerg_gen":
+ if unit.unit_type in (self.zergling_id, Zerg.Zergling):
+ return 0
+ if unit.unit_type in (self.hydralisk_id, Zerg.Hydralisk):
+ return 1
+ if unit.unit_type in (self.baneling_id, Zerg.Baneling):
+ return 2
+ raise AttributeError()
+
+ # Old stuff
+ if ally: # use new SC2 unit types
+ type_id = unit.unit_type - self._min_unit_type
+
+ if self.map_type == "stalkers_and_zealots":
+ # id(Stalker) = 74, id(Zealot) = 73
+ type_id = unit.unit_type - 73
+ elif self.map_type == "colossi_stalkers_zealots":
+ # id(Stalker) = 74, id(Zealot) = 73, id(Colossus) = 4
+ if unit.unit_type == 4:
+ type_id = 0
+ elif unit.unit_type == 74:
+ type_id = 1
+ else:
+ type_id = 2
+ elif self.map_type == "bane":
+ if unit.unit_type == 9:
+ type_id = 0
+ else:
+ type_id = 1
+ elif self.map_type == "MMM":
+ if unit.unit_type == 51:
+ type_id = 0
+ elif unit.unit_type == 48:
+ type_id = 1
+ else:
+ type_id = 2
+
+ return type_id
+
+ def get_avail_agent_actions(self, agent_id):
+ """Returns the available actions for agent_id."""
+ unit = self.get_unit_by_id(agent_id)
+ if unit.health > 0:
+ # cannot choose no-op when alive
+ avail_actions = [0] * self.n_actions
+
+ # stop should be allowed
+ avail_actions[1] = 1
+
+ # see if we can move
+ if self.can_move(unit, Direction.NORTH):
+ avail_actions[2] = 1
+ if self.can_move(unit, Direction.SOUTH):
+ avail_actions[3] = 1
+ if self.can_move(unit, Direction.EAST):
+ avail_actions[4] = 1
+ if self.can_move(unit, Direction.WEST):
+ avail_actions[5] = 1
+
+ if self.conic_fov:
+ avail_actions[6 : 6 + self.n_fov_actions] = [
+ 1
+ ] * self.n_fov_actions
+
+ # Can attack only alive units that are alive in the shooting range
+ shoot_range = self.unit_shoot_range(agent_id)
+
+ target_items = self.enemies.items()
+ if self.map_type == "MMM" and unit.unit_type == self.medivac_id:
+ # Medivacs cannot heal themselves or other flying units
+ target_items = [
+ (t_id, t_unit)
+ for (t_id, t_unit) in self.agents.items()
+ if t_unit.unit_type != self.medivac_id
+ ]
+ # should we only be able to target people in the cone?
+ for t_id, t_unit in target_items:
+ if t_unit.health > 0:
+ dist = self.distance(
+ unit.pos.x, unit.pos.y, t_unit.pos.x, t_unit.pos.y
+ )
+ can_shoot = (
+ dist <= shoot_range
+ if not self.conic_fov
+ else self.is_position_in_cone(
+ agent_id, t_unit.pos, range="shoot_range"
+ )
+ )
+ if can_shoot:
+ avail_actions[t_id + self.n_actions_no_attack] = 1
+
+ return avail_actions
+
+ else:
+ # only no-op allowed
+ return [1] + [0] * (self.n_actions - 1)
+
+ def get_avail_actions(self):
+ """Returns the available actions of all agents in a list."""
+ avail_actions = []
+ for agent_id in range(self.n_agents):
+ avail_agent = self.get_avail_agent_actions(agent_id)
+ avail_actions.append(avail_agent)
+ return avail_actions
+
+ def close(self):
+ """Close StarCraft II."""
+ if self.renderer is not None:
+ self.renderer.close()
+ self.renderer = None
+ if self._sc2_proc:
+ self._sc2_proc.close()
+
+ def seed(self):
+ """Returns the random seed used by the environment."""
+ return self._seed
+
+ def render(self, mode="human"):
+ if self.renderer is None:
+ from smac.env.starcraft2.render import StarCraft2Renderer
+
+ self.renderer = StarCraft2Renderer(self, mode)
+ assert (
+ mode == self.renderer.mode
+ ), "mode must be consistent across render calls"
+ return self.renderer.render(mode)
+
+ def _kill_units(self, unit_tags):
+ debug_command = [
+ d_pb.DebugCommand(kill_unit=d_pb.DebugKillUnit(tag=unit_tags))
+ ]
+ self._controller.debug(debug_command)
+
+ def _kill_all_units(self):
+ """Kill all units on the map. Steps controller and so can throw
+ exceptions"""
+ units = [unit.tag for unit in self._obs.observation.raw_data.units]
+ self._kill_units(units)
+ # check the units are dead
+ units = len(self._obs.observation.raw_data.units)
+ while len(self._obs.observation.raw_data.units) > 0:
+ self._controller.step(2)
+ self._obs = self._controller.observe()
+
+ def _create_new_team(self, team, episode_config):
+ # unit_names = {
+ # self.id_to_unit_name_map[unit.unit_type]
+ # for unit in self.agents.values()
+ # }
+ # It's important to set the number of agents and enemies
+ # because we use that to identify whether all the units have
+ # been created successfully
+
+ # TODO hardcoding init location. change this later for new maps
+ if not self.random_start:
+ ally_init_pos = [sc_common.Point2D(x=8, y=16)] * self.n_agents
+ # Spawning location of enemy units
+ enemy_init_pos = [sc_common.Point2D(x=24, y=16)] * self.n_enemies
+ else:
+ ally_init_pos = [
+ sc_common.Point2D(
+ x=self.ally_start_positions[i][0],
+ y=self.ally_start_positions[i][1],
+ )
+ for i in range(self.ally_start_positions.shape[0])
+ ]
+ enemy_init_pos = [
+ sc_common.Point2D(
+ x=self.enemy_start_positions[i][0],
+ y=self.enemy_start_positions[i][1],
+ )
+ for i in range(self.enemy_start_positions.shape[0])
+ ]
+ for unit_id, unit in enumerate(team):
+ unit_type_ally = self._convert_unit_name_to_unit_type(
+ unit, ally=True
+ )
+ debug_command = [
+ d_pb.DebugCommand(
+ create_unit=d_pb.DebugCreateUnit(
+ unit_type=unit_type_ally,
+ owner=1,
+ pos=ally_init_pos[unit_id],
+ quantity=1,
+ )
+ )
+ ]
+ self._controller.debug(debug_command)
+
+ unit_type_enemy = self._convert_unit_name_to_unit_type(
+ unit, ally=False
+ )
+ debug_command = [
+ d_pb.DebugCommand(
+ create_unit=d_pb.DebugCreateUnit(
+ unit_type=unit_type_enemy,
+ owner=2,
+ pos=enemy_init_pos[unit_id],
+ quantity=1,
+ )
+ )
+ ]
+ self._controller.debug(debug_command)
+
+ try:
+ self._controller.step(1)
+ self._obs = self._controller.observe()
+ except (protocol.ProtocolError, protocol.ConnectionError):
+ self.full_restart()
+ self.reset(episode_config=episode_config)
+
+ def _convert_unit_name_to_unit_type(self, unit_name, ally=True):
+ if ally:
+ return self.ally_unit_map[unit_name]
+ else:
+ return self.enemy_unit_map[unit_name]
+
+ def init_units(self, team, episode_config={}):
+ """Initialise the units."""
+ if team:
+ # can use any value for min unit type because
+ # it is hardcoded based on the version
+ self._init_ally_unit_types(0)
+ self._create_new_team(team, episode_config)
+ while True:
+ # Sometimes not all units have yet been created by SC2
+ self.agents = {}
+ self.enemies = {}
+
+ ally_units = [
+ unit
+ for unit in self._obs.observation.raw_data.units
+ if unit.owner == 1
+ ]
+ ally_units_sorted = sorted(
+ ally_units,
+ key=attrgetter("unit_type", "pos.x", "pos.y"),
+ reverse=False,
+ )
+
+ for i in range(len(ally_units_sorted)):
+ self.agents[i] = ally_units_sorted[i]
+ if self.debug:
+ logging.debug(
+ "Unit {} is {}, x = {}, y = {}".format(
+ len(self.agents),
+ self.agents[i].unit_type,
+ self.agents[i].pos.x,
+ self.agents[i].pos.y,
+ )
+ )
+
+ for unit in self._obs.observation.raw_data.units:
+ if unit.owner == 2:
+ self.enemies[len(self.enemies)] = unit
+ if self._episode_count == 0:
+ self.max_reward += unit.health_max + unit.shield_max
+
+ if self._episode_count == 0 and not team:
+ min_unit_type = min(
+ unit.unit_type for unit in self.agents.values()
+ )
+ self._init_ally_unit_types(min_unit_type)
+
+ all_agents_created = len(self.agents) == self.n_agents
+ all_enemies_created = len(self.enemies) == self.n_enemies
+
+ self._unit_types = [
+ unit.unit_type for unit in ally_units_sorted
+ ] + [
+ unit.unit_type
+ for unit in self._obs.observation.raw_data.units
+ if unit.owner == 2
+ ]
+
+ # TODO move this to the start
+ if all_agents_created and all_enemies_created: # all good
+ return
+
+ try:
+ self._controller.step(1)
+ self._obs = self._controller.observe()
+ except (protocol.ProtocolError, protocol.ConnectionError):
+ self.full_restart()
+ self.reset(episode_config=episode_config)
+
+ def get_unit_types(self):
+ if self._unit_types is None:
+ warn(
+ "unit types have not been initialized yet, please call"
+ "env.reset() to populate this and call t1286he method again."
+ )
+
+ return self._unit_types
+
+ def update_units(self):
+ """Update units after an environment step.
+ This function assumes that self._obs is up-to-date.
+ """
+ n_ally_alive = 0
+ n_enemy_alive = 0
+
+ # Store previous state
+ self.previous_ally_units = deepcopy(self.agents)
+ self.previous_enemy_units = deepcopy(self.enemies)
+
+ for al_id, al_unit in self.agents.items():
+ updated = False
+ for unit in self._obs.observation.raw_data.units:
+ if al_unit.tag == unit.tag:
+ self.agents[al_id] = unit
+ updated = True
+ n_ally_alive += 1
+ break
+
+ if not updated: # dead
+ al_unit.health = 0
+
+ for e_id, e_unit in self.enemies.items():
+ updated = False
+ for unit in self._obs.observation.raw_data.units:
+ if e_unit.tag == unit.tag:
+ self.enemies[e_id] = unit
+ updated = True
+ n_enemy_alive += 1
+ break
+
+ if not updated: # dead
+ e_unit.health = 0
+
+ if (
+ n_ally_alive == 0
+ and n_enemy_alive > 0
+ or self.only_medivac_left(ally=True)
+ ):
+ return -1 # lost
+ if (
+ n_ally_alive > 0
+ and n_enemy_alive == 0
+ or self.only_medivac_left(ally=False)
+ ):
+ return 1 # won
+ if n_ally_alive == 0 and n_enemy_alive == 0:
+ return 0
+
+ return None
+
+ def _register_unit_mapping(self, unit_name, unit_type_id):
+ self.id_to_unit_name_map[unit_type_id] = unit_name
+ self.unit_name_to_id_map[unit_name] = unit_type_id
+
+ def _init_ally_unit_types(self, min_unit_type):
+ """Initialise ally unit types. Should be called once from the
+ init_units function.
+ """
+
+ self._min_unit_type = min_unit_type
+
+ if "10gen_" in self.map_name:
+ num_rl_units = 9
+ self._min_unit_type = (
+ len(self._controller.data().units) - num_rl_units
+ )
+
+ self.baneling_id = self._min_unit_type
+ self.colossus_id = self._min_unit_type + 1
+ self.hydralisk_id = self._min_unit_type + 2
+ self.marauder_id = self._min_unit_type + 3
+ self.marine_id = self._min_unit_type + 4
+ self.medivac_id = self._min_unit_type + 5
+ self.stalker_id = self._min_unit_type + 6
+ self.zealot_id = self._min_unit_type + 7
+ self.zergling_id = self._min_unit_type + 8
+
+ self.ally_unit_map = {
+ "baneling": self.baneling_id,
+ "colossus": self.colossus_id,
+ "hydralisk": self.hydralisk_id,
+ "marauder": self.marauder_id,
+ "marine": self.marine_id,
+ "medivac": self.medivac_id,
+ "stalker": self.stalker_id,
+ "zealot": self.zealot_id,
+ "zergling": self.zergling_id,
+ }
+ self.enemy_unit_map = {
+ "baneling": Zerg.Baneling,
+ "colossus": Protoss.Colossus,
+ "hydralisk": Zerg.Hydralisk,
+ "marauder": Terran.Marauder,
+ "marine": Terran.Marine,
+ "medivac": Terran.Medivac,
+ "stalker": Protoss.Stalker,
+ "zealot": Protoss.Zealot,
+ "zergling": Zerg.Zergling,
+ }
+
+ else:
+ if self.map_type == "marines":
+ self.marine_id = min_unit_type
+ self._register_unit_mapping("marine", min_unit_type)
+ elif self.map_type == "stalkers_and_zealots":
+ self.stalker_id = min_unit_type
+ self._register_unit_mapping("stalker", min_unit_type)
+ self.zealot_id = min_unit_type + 1
+ self._register_unit_mapping("zealot", min_unit_type + 1)
+ elif self.map_type == "colossi_stalkers_zealots":
+ self.colossus_id = min_unit_type
+ self._register_unit_mapping("colossus", min_unit_type)
+ self.stalker_id = min_unit_type + 1
+ self._register_unit_mapping("stalker", min_unit_type + 1)
+ self.zealot_id = min_unit_type + 2
+ self._register_unit_mapping("zealot", min_unit_type + 2)
+ elif self.map_type == "MMM":
+ self.marauder_id = min_unit_type
+ self._register_unit_mapping("marauder", min_unit_type)
+ self.marine_id = min_unit_type + 1
+ self._register_unit_mapping("marine", min_unit_type + 1)
+ self.medivac_id = min_unit_type + 2
+ self._register_unit_mapping("medivac", min_unit_type + 2)
+ elif self.map_type == "zealots":
+ self.zealot_id = min_unit_type
+ self._register_unit_mapping("zealot", min_unit_type)
+ elif self.map_type == "hydralisks":
+ self.hydralisk_id = min_unit_type
+ self._register_unit_mapping("hydralisk", min_unit_type)
+ elif self.map_type == "stalkers":
+ self.stalker_id = min_unit_type
+ self._register_unit_mapping("stalker", min_unit_type)
+ elif self.map_type == "colossus":
+ self.colossus_id = min_unit_type
+ self._register_unit_mapping("colossus", min_unit_type)
+ elif self.map_type == "bane":
+ self.baneling_id = min_unit_type
+ self._register_unit_mapping("baneling", min_unit_type)
+ self.zergling_id = min_unit_type + 1
+ self._register_unit_mapping("zergling", min_unit_type + 1)
+
+ def only_medivac_left(self, ally):
+ """Check if only Medivac units are left."""
+ if self.map_type != "MMM" and self.map_type != "terran_gen":
+ return False
+
+ if ally:
+ units_alive = [
+ a
+ for a in self.agents.values()
+ if (a.health > 0 and a.unit_type != self.medivac_id)
+ ]
+ if len(units_alive) == 0:
+ return True
+ return False
+ else:
+ units_alive = [
+ a
+ for a in self.enemies.values()
+ if (a.health > 0 and a.unit_type != Terran.Medivac)
+ ]
+ if len(units_alive) == 0:
+ return True
+ return False
+
+ def get_unit_by_id(self, a_id):
+ """Get unit by ID."""
+ return self.agents[a_id]
+
+ def get_stats(self):
+ stats = {
+ "battles_won": self.battles_won,
+ "battles_game": self.battles_game,
+ "battles_draw": self.timeouts,
+ "win_rate": self.battles_won / self.battles_game,
+ "timeouts": self.timeouts,
+ "restarts": self.force_restarts,
+ }
+ return stats
+
+ def get_env_info(self):
+ env_info = super().get_env_info()
+ env_info["agent_features"] = (
+ self.ally_state_attr_names + self.capability_attr_names
+ )
+ env_info["enemy_features"] = self.enemy_state_attr_names
+ return env_info
\ No newline at end of file
diff --git a/src/envs/smac_v2/official/smacv2_test.py b/src/envs/smac_v2/official/smacv2_test.py
new file mode 100644
index 0000000..516c022
--- /dev/null
+++ b/src/envs/smac_v2/official/smacv2_test.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python
+# -*- coding: UTF-8 -*-
+'''
+@Project :API-Network
+@File :smacv2_test.py
+@Author :Hao Xiaotian
+@Date :2022/10/15 18:45
+'''
+
+import sys
+import os
+
+if sys.platform == "linux":
+ os.environ.setdefault("SC2PATH",
+ os.path.join(os.path.dirname(os.path.dirname(os.getcwd())), "3rdparty", "StarCraftII"))
+
+import time
+
+import numpy as np
+from absl import logging
+from wrapper import StarCraftCapabilityEnvWrapper
+
+logging.set_verbosity(logging.DEBUG)
+
+
+def main():
+ distribution_config = {
+ "n_units": 5,
+ "team_gen": {
+ "dist_type": "weighted_teams",
+ "unit_types": ["marine", "marauder", "medivac"],
+ "exception_unit_types": ["medivac"],
+ "weights": [0.45, 0.45, 0.1],
+ "observe": True,
+ },
+ "start_positions": {
+ "dist_type": "surrounded_and_reflect",
+ "p": 0.5,
+ "n_enemies": 5,
+ "map_x": 32,
+ "map_y": 32,
+ },
+ }
+ env = StarCraftCapabilityEnvWrapper(
+ capability_config=distribution_config,
+ map_name="10gen_terran",
+ debug=True,
+ conic_fov=True,
+ obs_own_pos=True,
+ )
+
+ env_info = env.get_env_info()
+
+ n_actions = env_info["n_actions"]
+ n_agents = env_info["n_agents"]
+
+ n_episodes = 10
+
+ print("Training episodes")
+ for e in range(n_episodes):
+ env.reset()
+ terminated = False
+ episode_reward = 0
+
+ while not terminated:
+ obs = env.get_obs()
+ state = env.get_state()
+ # env.render() # Uncomment for rendering
+
+ actions = []
+ for agent_id in range(n_agents):
+ avail_actions = env.get_avail_agent_actions(agent_id)
+ avail_actions_ind = np.nonzero(avail_actions)[0]
+ action = np.random.choice(avail_actions_ind)
+ actions.append(action)
+
+ reward, terminated, _ = env.step(actions)
+ time.sleep(0.15)
+ episode_reward += reward
+ print("Total reward in episode {} = {}".format(e, episode_reward))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/src/envs/smac_v2/official/starcraft2.py b/src/envs/smac_v2/official/starcraft2.py
new file mode 100644
index 0000000..ed14128
--- /dev/null
+++ b/src/envs/smac_v2/official/starcraft2.py
@@ -0,0 +1,2302 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from envs.multiagentenv import MultiAgentEnv
+from .maps import get_map_params
+
+import atexit
+from warnings import warn
+from operator import attrgetter
+from copy import deepcopy
+import numpy as np
+import enum
+import math
+from absl import logging
+from pysc2.lib.units import Neutral, Protoss, Terran, Zerg
+
+from pysc2 import maps
+from pysc2 import run_configs
+from pysc2.lib import protocol
+
+from s2clientprotocol import common_pb2 as sc_common
+from s2clientprotocol import sc2api_pb2 as sc_pb
+from s2clientprotocol import raw_pb2 as r_pb
+from s2clientprotocol import debug_pb2 as d_pb
+
+races = {
+ "R": sc_common.Random,
+ "P": sc_common.Protoss,
+ "T": sc_common.Terran,
+ "Z": sc_common.Zerg,
+}
+
+difficulties = {
+ "1": sc_pb.VeryEasy,
+ "2": sc_pb.Easy,
+ "3": sc_pb.Medium,
+ "4": sc_pb.MediumHard,
+ "5": sc_pb.Hard,
+ "6": sc_pb.Harder,
+ "7": sc_pb.VeryHard,
+ "8": sc_pb.CheatVision,
+ "9": sc_pb.CheatMoney,
+ "A": sc_pb.CheatInsane,
+}
+
+actions = {
+ "move": 16, # target: PointOrUnit
+ "attack": 23, # target: PointOrUnit
+ "stop": 4, # target: None
+ "heal": 386, # Unit
+}
+
+
+class Direction(enum.IntEnum):
+ NORTH = 0
+ SOUTH = 1
+ EAST = 2
+ WEST = 3
+
+
+EPS = 1e-7
+
+
+class StarCraft2Env(MultiAgentEnv):
+ """The StarCraft II environment for decentralised multi-agent
+ micromanagement scenarios.
+ """
+
+ def __init__(
+ self,
+ map_name="8m",
+ step_mul=8,
+ move_amount=2,
+ difficulty="7",
+ game_version=None,
+ seed=None,
+ continuing_episode=False,
+ obs_all_health=True,
+ obs_own_health=True,
+ obs_last_action=False,
+ obs_pathing_grid=False,
+ obs_terrain_height=False,
+ obs_instead_of_state=False,
+ obs_timestep_number=False,
+ obs_own_pos=False,
+ obs_starcraft=True,
+ conic_fov=False,
+ num_fov_actions=12,
+ state_last_action=True,
+ state_timestep_number=False,
+ reward_sparse=False,
+ reward_only_positive=True,
+ reward_death_value=10,
+ reward_win=200,
+ reward_defeat=0,
+ reward_negative_scale=0.5,
+ reward_scale=True,
+ reward_scale_rate=20,
+ kill_unit_step_mul=2,
+ fully_observable=False,
+ capability_config={},
+ replay_dir="",
+ replay_prefix="",
+ window_size_x=1920,
+ window_size_y=1200,
+ heuristic_ai=False,
+ heuristic_rest=False,
+ debug=False,
+ ):
+ """
+ Create a StarCraftC2Env environment.
+
+ Parameters
+ ----------
+ map_name : str, optional
+ The name of the SC2 map to play (default is "8m"). The full list
+ can be found by running bin/map_list.
+ step_mul : int, optional
+ How many game steps per agent step (default is 8). None
+ indicates to use the default map step_mul.
+ move_amount : float, optional
+ How far away units are ordered to move per step (default is 2).
+ difficulty : str, optional
+ The difficulty of built-in computer AI bot (default is "7").
+ game_version : str, optional
+ StarCraft II game version (default is None). None indicates the
+ latest version.
+ seed : int, optional
+ Random seed used during game initialisation. This allows to
+ continuing_episode : bool, optional
+ Whether to consider episodes continuing or finished after time
+ limit is reached (default is False).
+ obs_all_health : bool, optional
+ Agents receive the health of all units (in the sight range) as part
+ of observations (default is True).
+ obs_own_health : bool, optional
+ Agents receive their own health as a part of observations (default
+ is False). This flag is ignored when obs_all_health == True.
+ obs_last_action : bool, optional
+ Agents receive the last actions of all units (in the sight range)
+ as part of observations (default is False).
+ obs_pathing_grid : bool, optional
+ Whether observations include pathing values surrounding the agent
+ (default is False).
+ obs_terrain_height : bool, optional
+ Whether observations include terrain height values surrounding the
+ agent (default is False).
+ obs_instead_of_state : bool, optional
+ Use combination of all agents' observations as the global state
+ (default is False).
+ obs_timestep_number : bool, optional
+ Whether observations include the current timestep of the episode
+ (default is False).
+ state_last_action : bool, optional
+ Include the last actions of all agents as part of the global state
+ (default is True).
+ state_timestep_number : bool, optional
+ Whether the state include the current timestep of the episode
+ (default is False).
+ reward_sparse : bool, optional
+ Receive 1/-1 reward for winning/loosing an episode (default is
+ False). Whe rest of reward parameters are ignored if True.
+ reward_only_positive : bool, optional
+ Reward is always positive (default is True).
+ reward_death_value : float, optional
+ The amount of reward received for killing an enemy unit (default
+ is 10). This is also the negative penalty for having an allied unit
+ killed if reward_only_positive == False.
+ reward_win : float, optional
+ The reward for winning in an episode (default is 200).
+ reward_defeat : float, optional
+ The reward for loosing in an episode (default is 0). This value
+ should be nonpositive.
+ reward_negative_scale : float, optional
+ Scaling factor for negative rewards (default is 0.5). This
+ parameter is ignored when reward_only_positive == True.
+ reward_scale : bool, optional
+ Whether or not to scale the reward (default is True).
+ reward_scale_rate : float, optional
+ Reward scale rate (default is 20). When reward_scale == True, the
+ reward received by the agents is divided by (max_reward /
+ reward_scale_rate), where max_reward is the maximum possible
+ reward per episode without considering the shield regeneration
+ of Protoss units.
+ replay_dir : str, optional
+ The directory to save replays (default is None). If None, the
+ replay will be saved in Replays directory where StarCraft II is
+ installed.
+ replay_prefix : str, optional
+ The prefix of the replay to be saved (default is None). If None,
+ the name of the map will be used.
+ window_size_x : int, optional
+ The length of StarCraft II window size (default is 1920).
+ window_size_y: int, optional
+ The height of StarCraft II window size (default is 1200).
+ heuristic_ai: bool, optional
+ Whether or not to use a non-learning heuristic AI (default False).
+ heuristic_rest: bool, optional
+ At any moment, restrict the actions of the heuristic AI to be
+ chosen from actions available to RL agents (default is False).
+ Ignored if heuristic_ai == False.
+ debug: bool, optional
+ Log messages about observations, state, actions and rewards for
+ debugging purposes (default is False).
+ """
+ # Map arguments
+ self.map_name = map_name
+ map_params = get_map_params(self.map_name)
+ self.map_params = map_params
+ self.episode_limit = map_params["limit"]
+ self._move_amount = move_amount
+ self._step_mul = step_mul
+ self._kill_unit_step_mul = kill_unit_step_mul
+ self.difficulty = difficulty
+
+ # Observations and state
+ self.obs_own_health = obs_own_health
+ self.obs_all_health = obs_all_health
+ self.obs_instead_of_state = obs_instead_of_state
+ self.obs_last_action = obs_last_action
+ self.obs_pathing_grid = obs_pathing_grid
+ self.obs_terrain_height = obs_terrain_height
+ self.obs_timestep_number = obs_timestep_number
+ self.obs_starcraft = obs_starcraft
+ self.state_last_action = state_last_action
+ self.state_timestep_number = state_timestep_number
+ if self.obs_all_health:
+ self.obs_own_health = True
+ self.n_obs_pathing = 8
+ self.n_obs_height = 9
+
+ # Rewards args
+ self.reward_sparse = reward_sparse
+ self.reward_only_positive = reward_only_positive
+ self.reward_negative_scale = reward_negative_scale
+ self.reward_death_value = reward_death_value
+ self.reward_win = reward_win
+ self.reward_defeat = reward_defeat
+ self.reward_scale = reward_scale
+ self.reward_scale_rate = reward_scale_rate
+
+ # Meta MARL
+ self.capability_config = capability_config
+ self.fully_observable = fully_observable
+ self.stochastic_attack = "attack" in self.capability_config
+ self.stochastic_health = "health" in self.capability_config
+ self.replace_teammates = "team_gen" in self.capability_config
+ self.obs_own_pos = obs_own_pos
+ self.mask_enemies = "enemy_mask" in self.capability_config
+ if self.stochastic_attack:
+ self.zero_pad_stochastic_attack = not self.capability_config[
+ "attack"
+ ]["observe"]
+ self.observe_attack_probs = self.capability_config["attack"][
+ "observe"
+ ]
+ if self.stochastic_health:
+ self.zero_pad_health = not self.capability_config["health"][
+ "observe"
+ ]
+ self.observe_teammate_health = self.capability_config["health"][
+ "observe"
+ ]
+ if self.replace_teammates:
+ self.zero_pad_unit_types = not self.capability_config["team_gen"][
+ "observe"
+ ]
+ self.observe_teammate_types = self.capability_config["team_gen"][
+ "observe"
+ ]
+ self.n_agents = (
+ map_params["n_agents"]
+ if not self.replace_teammates
+ else self.capability_config["team_gen"]["n_units"]
+ )
+ self.n_enemies = (
+ map_params["n_enemies"]
+ if not self.replace_teammates
+ else self.capability_config["team_gen"]["n_units"]
+ )
+ self.random_start = "start_positions" in self.capability_config
+ self.conic_fov = conic_fov
+ self.n_fov_actions = num_fov_actions if self.conic_fov else 0
+ self.conic_fov_angle = (
+ (2 * np.pi) / self.n_fov_actions if self.conic_fov else 0
+ )
+ # Other
+ self.game_version = game_version
+ self.continuing_episode = continuing_episode
+ self._seed = seed
+ self.heuristic_ai = heuristic_ai
+ self.heuristic_rest = heuristic_rest
+ self.debug = debug
+ self.window_size = (window_size_x, window_size_y)
+ self.replay_dir = replay_dir
+ self.replay_prefix = replay_prefix
+
+ # Actions
+ self.n_actions_move = 4
+
+ self.n_actions_no_attack = self.n_actions_move + self.n_fov_actions + 2
+ self.n_actions = self.n_actions_no_attack + self.n_enemies
+
+ # Map info
+ self._agent_race = map_params["a_race"]
+ self._bot_race = map_params["b_race"]
+ self.shield_bits_ally = 1 if self._agent_race == "P" else 0
+ self.shield_bits_enemy = 1 if self._bot_race == "P" else 0
+ # NOTE: The map_type, which is used to initialise the unit
+ # type ids, the unit_type_bits and the races, are still properties of the
+ # map. This means even the 10gen_{race} maps are limited to the
+ # unit types statically defined in the unit type id assignment.
+ # Lifting this restriction shouldn't be too much work, I've just
+ # not done it.
+ self.unit_type_bits = map_params["unit_type_bits"]
+ self.map_type = map_params["map_type"]
+ self._unit_types = None
+
+ self.max_reward = (
+ self.n_enemies * self.reward_death_value + self.reward_win
+ )
+
+ # create lists containing the names of attributes returned in states
+ self.ally_state_attr_names = [
+ "health",
+ "energy/cooldown",
+ "rel_x",
+ "rel_y",
+ ]
+ self.enemy_state_attr_names = ["health", "rel_x", "rel_y"]
+
+ if self.shield_bits_ally > 0:
+ self.ally_state_attr_names += ["shield"]
+ if self.shield_bits_enemy > 0:
+ self.enemy_state_attr_names += ["shield"]
+ if self.conic_fov:
+ self.ally_state_attr_names += ["fov_x", "fov_y"]
+
+ self.capability_attr_names = []
+ if "attack" in self.capability_config:
+ self.capability_attr_names += ["attack_probability"]
+ if "health" in self.capability_config:
+ self.capability_attr_names += ["total_health"]
+ if self.unit_type_bits > 0:
+ bit_attr_names = [
+ "type_{}".format(bit) for bit in range(self.unit_type_bits)
+ ]
+ self.capability_attr_names += bit_attr_names
+ self.enemy_state_attr_names += bit_attr_names
+
+ self.agents = {}
+ self.enemies = {}
+ self.unit_name_to_id_map = {}
+ self.id_to_unit_name_map = {}
+ self._episode_count = 0
+ self._episode_steps = 0
+ self._total_steps = 0
+ self._obs = None
+ self.battles_won = 0
+ self.battles_game = 0
+ self.timeouts = 0
+ self.force_restarts = 0
+ self.last_stats = None
+ self.agent_attack_probabilities = np.zeros(self.n_agents)
+ self.agent_health_levels = np.zeros(self.n_agents)
+ self.death_tracker_ally = np.zeros(self.n_agents)
+ self.death_tracker_enemy = np.zeros(self.n_enemies)
+ self.fov_directions = np.zeros((self.n_agents, 2))
+ self.fov_directions[:, 0] = 1.0
+ self.canonical_fov_directions = np.array(
+ [
+ (
+ np.cos(2 * np.pi * (i / self.n_fov_actions)),
+ np.sin(2 * np.pi * (i / self.n_fov_actions)),
+ )
+ for i in range(self.n_fov_actions)
+ ]
+ )
+ self.new_unit_positions = np.zeros((self.n_agents, 2))
+ self.previous_ally_units = None
+ self.previous_enemy_units = None
+ self.last_action = np.zeros((self.n_agents, self.n_actions))
+ self.init_positions = np.zeros((self.n_agents, 2))
+ self._min_unit_type = 0
+ self.marine_id = self.marauder_id = self.medivac_id = 0
+ self.hydralisk_id = self.zergling_id = self.baneling_id = 0
+ self.stalker_id = self.colossus_id = self.zealot_id = 0
+ self.max_distance_x = 0
+ self.max_distance_y = 0
+ self.map_x = 0
+ self.map_y = 0
+ self.reward = 0
+ self.renderer = None
+ self.terrain_height = None
+ self.pathing_grid = None
+ self._run_config = None
+ self._sc2_proc = None
+ self._controller = None
+ # Try to avoid leaking SC2 processes on shutdown
+ atexit.register(lambda: self.close())
+
+ def _only_one_meta_marl_flag_on(self):
+ """Function that checks that either all the meta marl flags are off,
+ or at most one has been enabled."""
+ if self.stochastic_attack:
+ return not self.stochastic_health and not self.replace_teammates
+ else:
+ return not self.replace_teammates or not self.stochastic_health
+
+ def _launch(self):
+ """Launch the StarCraft II game."""
+ self._run_config = run_configs.get(version=self.game_version)
+ self.version = self._run_config.version
+ _map = maps.get(self.map_name)
+
+ # Setting up the interface
+ interface_options = sc_pb.InterfaceOptions(raw=True, score=False)
+ self._sc2_proc = self._run_config.start(
+ window_size=self.window_size, want_rgb=False
+ )
+ self._controller = self._sc2_proc.controller
+
+ # Request to create the game
+ create = sc_pb.RequestCreateGame(
+ local_map=sc_pb.LocalMap(
+ map_path=_map.path,
+ map_data=self._run_config.map_data(_map.path),
+ ),
+ realtime=False,
+ random_seed=self._seed,
+ )
+ create.player_setup.add(type=sc_pb.Participant)
+ create.player_setup.add(
+ type=sc_pb.Computer,
+ race=races[self._bot_race],
+ difficulty=difficulties[self.difficulty],
+ )
+ self._controller.create_game(create)
+
+ join = sc_pb.RequestJoinGame(
+ race=races[self._agent_race], options=interface_options
+ )
+ self._controller.join_game(join)
+
+ game_info = self._controller.game_info()
+ map_info = game_info.start_raw
+ self.map_play_area_min = map_info.playable_area.p0
+ self.map_play_area_max = map_info.playable_area.p1
+ self.max_distance_x = (
+ self.map_play_area_max.x - self.map_play_area_min.x
+ )
+ self.max_distance_y = (
+ self.map_play_area_max.y - self.map_play_area_min.y
+ )
+ self.map_x = map_info.map_size.x
+ self.map_y = map_info.map_size.y
+
+ if map_info.pathing_grid.bits_per_pixel == 1:
+ vals = np.array(list(map_info.pathing_grid.data)).reshape(
+ self.map_x, int(self.map_y / 8)
+ )
+ self.pathing_grid = np.transpose(
+ np.array(
+ [
+ [(b >> i) & 1 for b in row for i in range(7, -1, -1)]
+ for row in vals
+ ],
+ dtype=np.bool,
+ )
+ )
+ else:
+ self.pathing_grid = np.invert(
+ np.flip(
+ np.transpose(
+ np.array(
+ list(map_info.pathing_grid.data), dtype=np.bool
+ ).reshape(self.map_x, self.map_y)
+ ),
+ axis=1,
+ )
+ )
+
+ self.terrain_height = (
+ np.flip(
+ np.transpose(
+ np.array(list(map_info.terrain_height.data)).reshape(
+ self.map_x, self.map_y
+ )
+ ),
+ 1,
+ )
+ / 255
+ )
+
+ def reset(self, episode_config={}):
+ """Reset the environment. Required after each full episode.
+ Returns initial observations and states.
+ """
+ self._episode_steps = 0
+ self.episode_config = episode_config
+ if self._episode_count == 0:
+ # Launch StarCraft II
+ self._launch()
+ else:
+ self._restart()
+
+ # Information kept for counting the reward
+ self.agent_attack_probabilities = episode_config.get("attack", {}).get(
+ "item", None
+ )
+ self.agent_health_levels = episode_config.get("health", {}).get(
+ "item", None
+ )
+ self.enemy_mask = episode_config.get("enemy_mask", {}).get(
+ "item", None
+ )
+ self.ally_start_positions = episode_config.get(
+ "ally_start_positions", {}
+ ).get("item", None)
+ self.enemy_start_positions = episode_config.get(
+ "enemy_start_positions", {}
+ ).get("item", None)
+ self.mask_enemies = self.enemy_mask is not None
+ team = episode_config.get("team_gen", {}).get("item", None)
+ self.death_tracker_ally = np.zeros(self.n_agents)
+ self.death_tracker_enemy = np.zeros(self.n_enemies)
+ self.fov_directions = np.zeros((self.n_agents, 2))
+ self.fov_directions[:, 0] = 1.0
+ self.previous_ally_units = None
+ self.previous_enemy_units = None
+ self.win_counted = False
+ self.defeat_counted = False
+ if self.debug:
+ logging.debug(
+ "Attack Probabilities: {}".format(self.agent_attack_probabilities)
+ )
+ logging.debug("Health Levels: {}".format(self.agent_health_levels))
+ self.last_action = np.zeros((self.n_agents, self.n_actions))
+
+ if self.heuristic_ai:
+ self.heuristic_targets = [None] * self.n_agents
+
+ try:
+ self._obs = self._controller.observe()
+ self.init_units(team, episode_config=episode_config)
+ except (protocol.ProtocolError, protocol.ConnectionError):
+ self.full_restart()
+
+ if self.debug:
+ logging.debug(
+ "Started Episode {}".format(self._episode_count).center(
+ 60, "*"
+ )
+ )
+ return self.get_obs(), self.get_state()
+
+ def _restart(self):
+ """Restart the environment by killing all units on the map.
+ There is a trigger in the SC2Map file, which restarts the
+ episode when there are no units left.
+ """
+ try:
+ self._kill_all_units()
+ except (protocol.ProtocolError, protocol.ConnectionError):
+ self.full_restart()
+
+ def full_restart(self):
+ """Full restart. Closes the SC2 process and launches a new one."""
+ self._sc2_proc.close()
+ self._launch()
+ self.force_restarts += 1
+
+ def _kill_units_below_health_level(self):
+ units_to_kill = []
+ for al_id, al_unit in self.agents.items():
+ if (
+ al_unit.health / al_unit.health_max
+ < self.agent_health_levels[al_id]
+ ) and not self.death_tracker_ally[al_id]:
+ units_to_kill.append(al_unit.tag)
+ self._kill_units(units_to_kill)
+
+ def step(self, actions):
+ """A single environment step. Returns reward, terminated, info."""
+ actions_int = [int(a) for a in actions]
+
+ self.last_action = np.eye(self.n_actions)[np.array(actions_int)]
+
+ # Collect individual actions
+ sc_actions = []
+ if self.debug:
+ logging.debug("Actions".center(60, "-"))
+
+ for a_id, action in enumerate(actions_int):
+ if not self.heuristic_ai:
+ sc_action = self.get_agent_action(a_id, action)
+ else:
+ sc_action, action_num = self.get_agent_action_heuristic(
+ a_id, action
+ )
+ actions[a_id] = action_num
+ if sc_action:
+ sc_actions.append(sc_action)
+ # Send action request
+ req_actions = sc_pb.RequestAction(actions=sc_actions)
+
+ try:
+ if self.conic_fov:
+ self.render_fovs()
+ self._controller.actions(req_actions)
+ # Make step in SC2, i.e. apply actions
+ if not self.stochastic_health:
+ self._controller.step(self._step_mul)
+ else:
+ self._controller.step(
+ self._step_mul - self._kill_unit_step_mul
+ )
+ self._kill_units_below_health_level()
+ self._controller.step(self._kill_unit_step_mul)
+ # Observe here so that we know if the episode is over.
+ self._obs = self._controller.observe()
+ except (protocol.ProtocolError, protocol.ConnectionError):
+ self.full_restart()
+ return 0, True, {}
+
+ self._total_steps += 1
+ self._episode_steps += 1
+
+ # Update units
+ game_end_code = self.update_units()
+
+ terminated = False
+ reward = self.reward_battle()
+ info = {"battle_won": False}
+
+ # count units that are still alive
+ dead_allies, dead_enemies = 0, 0
+ for _al_id, al_unit in self.agents.items():
+ if al_unit.health == 0:
+ dead_allies += 1
+ for _e_id, e_unit in self.enemies.items():
+ if e_unit.health == 0:
+ dead_enemies += 1
+
+ info["dead_allies"] = dead_allies
+ info["dead_enemies"] = dead_enemies
+
+ if game_end_code is not None:
+ # Battle is over
+ terminated = True
+ self.battles_game += 1
+ if game_end_code == 1 and not self.win_counted:
+ self.battles_won += 1
+ self.win_counted = True
+ info["battle_won"] = True
+ if not self.reward_sparse:
+ reward += self.reward_win
+ else:
+ reward = 1
+ elif game_end_code == -1 and not self.defeat_counted:
+ self.defeat_counted = True
+ if not self.reward_sparse:
+ reward += self.reward_defeat
+ else:
+ reward = -1
+
+ elif self._episode_steps >= self.episode_limit:
+ # Episode limit reached
+ terminated = True
+ if self.continuing_episode:
+ info["episode_limit"] = True
+ self.battles_game += 1
+ self.timeouts += 1
+
+ if self.debug:
+ logging.debug("Reward = {}".format(reward).center(60, "-"))
+
+ if terminated:
+ self._episode_count += 1
+
+ if self.reward_scale:
+ reward /= self.max_reward / self.reward_scale_rate
+
+ self.reward = reward
+
+ return reward, terminated, info
+
+ def get_agent_action(self, a_id, action):
+ """Construct the action for agent a_id."""
+ avail_actions = self.get_avail_agent_actions(a_id)
+ assert (
+ avail_actions[action] == 1
+ ), "Agent {} cannot perform action {}".format(a_id, action)
+
+ unit = self.get_unit_by_id(a_id)
+ tag = unit.tag
+ x = unit.pos.x
+ y = unit.pos.y
+
+ if action == 0:
+ # no-op (valid only when dead)
+ assert unit.health == 0, "No-op only available for dead agents."
+ if self.debug:
+ logging.debug("Agent {}: Dead".format(a_id))
+ return None
+ elif action == 1:
+ # stop
+ cmd = r_pb.ActionRawUnitCommand(
+ ability_id=actions["stop"],
+ unit_tags=[tag],
+ queue_command=False,
+ )
+ if self.debug:
+ logging.debug("Agent {}: Stop".format(a_id))
+
+ elif action == 2:
+ # move north
+ cmd = r_pb.ActionRawUnitCommand(
+ ability_id=actions["move"],
+ target_world_space_pos=sc_common.Point2D(
+ x=x, y=y + self._move_amount
+ ),
+ unit_tags=[tag],
+ queue_command=False,
+ )
+ self.new_unit_positions[a_id] = np.array(
+ [x, y + self._move_amount]
+ )
+ if self.debug:
+ logging.debug("Agent {}: Move North".format(a_id))
+
+ elif action == 3:
+ # move south
+ cmd = r_pb.ActionRawUnitCommand(
+ ability_id=actions["move"],
+ target_world_space_pos=sc_common.Point2D(
+ x=x, y=y - self._move_amount
+ ),
+ unit_tags=[tag],
+ queue_command=False,
+ )
+ self.new_unit_positions[a_id] = np.array(
+ [x, y - self._move_amount]
+ )
+ if self.debug:
+ logging.debug("Agent {}: Move South".format(a_id))
+
+ elif action == 4:
+ # move east
+ cmd = r_pb.ActionRawUnitCommand(
+ ability_id=actions["move"],
+ target_world_space_pos=sc_common.Point2D(
+ x=x + self._move_amount, y=y
+ ),
+ unit_tags=[tag],
+ queue_command=False,
+ )
+ self.new_unit_positions[a_id] = np.array(
+ [x + self._move_amount, y]
+ )
+
+ if self.debug:
+ logging.debug("Agent {}: Move East".format(a_id))
+
+ elif action == 5:
+ # move west
+ cmd = r_pb.ActionRawUnitCommand(
+ ability_id=actions["move"],
+ target_world_space_pos=sc_common.Point2D(
+ x=x - self._move_amount, y=y
+ ),
+ unit_tags=[tag],
+ queue_command=False,
+ )
+ self.new_unit_positions[a_id] = np.array(
+ [x - self._move_amount, y]
+ )
+ if self.debug:
+ logging.debug("Agent {}: Move West".format(a_id))
+ elif self.conic_fov and action in range(6, 6 + self.n_fov_actions):
+ self.fov_directions[a_id] = self.canonical_fov_directions[
+ action - 6
+ ]
+ cmd = None
+ else:
+ # attack/heal units that are in range
+ target_id = action - self.n_actions_no_attack
+ if (
+ self.map_type in ["MMM", "terran_gen"]
+ and unit.unit_type == self.medivac_id
+ ):
+ target_unit = self.agents[target_id]
+ action_name = "heal"
+ else:
+ target_unit = self.enemies[target_id]
+ action_name = "attack"
+
+ if self.stochastic_attack:
+ p = np.random.default_rng().uniform()
+ if p > self.agent_attack_probabilities[a_id]:
+ if self.debug:
+ logging.debug(
+ "Agent {} {}s {}, but fails".format(a_id, action_name, target_id)
+ )
+ return None
+ action_id = actions[action_name]
+ target_tag = target_unit.tag
+
+ cmd = r_pb.ActionRawUnitCommand(
+ ability_id=action_id,
+ target_unit_tag=target_tag,
+ unit_tags=[tag],
+ queue_command=False,
+ )
+
+ if self.debug:
+ logging.debug(
+ "Agent {} {}s unit # {}".format(
+ a_id, action_name, target_id
+ )
+ )
+ if cmd:
+ sc_action = sc_pb.Action(
+ action_raw=r_pb.ActionRaw(unit_command=cmd)
+ )
+ return sc_action
+ return None
+
+ def get_agent_action_heuristic(self, a_id, action):
+ unit = self.get_unit_by_id(a_id)
+ tag = unit.tag
+
+ target = self.heuristic_targets[a_id]
+ if unit.unit_type == self.medivac_id:
+ if (
+ target is None
+ or self.agents[target].health == 0
+ or self.agents[target].health == self.agents[target].health_max
+ ):
+ min_dist = math.hypot(self.max_distance_x, self.max_distance_y)
+ min_id = -1
+ for al_id, al_unit in self.agents.items():
+ if al_unit.unit_type == self.medivac_id:
+ continue
+ if (
+ al_unit.health != 0
+ and al_unit.health != al_unit.health_max
+ ):
+ dist = self.distance(
+ unit.pos.x,
+ unit.pos.y,
+ al_unit.pos.x,
+ al_unit.pos.y,
+ )
+ if dist < min_dist:
+ min_dist = dist
+ min_id = al_id
+ self.heuristic_targets[a_id] = min_id
+ if min_id == -1:
+ self.heuristic_targets[a_id] = None
+ return None, 0
+ action_id = actions["heal"]
+ target_tag = self.agents[self.heuristic_targets[a_id]].tag
+ else:
+ if target is None or self.enemies[target].health == 0:
+ min_dist = math.hypot(self.max_distance_x, self.max_distance_y)
+ min_id = -1
+ for e_id, e_unit in self.enemies.items():
+ if (
+ unit.unit_type == self.marauder_id
+ and e_unit.unit_type == self.medivac_id
+ ):
+ continue
+ if e_unit.health > 0:
+ dist = self.distance(
+ unit.pos.x, unit.pos.y, e_unit.pos.x, e_unit.pos.y
+ )
+ if dist < min_dist:
+ min_dist = dist
+ min_id = e_id
+ self.heuristic_targets[a_id] = min_id
+ if min_id == -1:
+ self.heuristic_targets[a_id] = None
+ return None, 0
+ action_id = actions["attack"]
+ target_tag = self.enemies[self.heuristic_targets[a_id]].tag
+
+ action_num = self.heuristic_targets[a_id] + self.n_actions_no_attack
+
+ # Check if the action is available
+ if (
+ self.heuristic_rest
+ and self.get_avail_agent_actions(a_id)[action_num] == 0
+ ):
+
+ # Move towards the target rather than attacking/healing
+ if unit.unit_type == self.medivac_id:
+ target_unit = self.agents[self.heuristic_targets[a_id]]
+ else:
+ target_unit = self.enemies[self.heuristic_targets[a_id]]
+
+ delta_x = target_unit.pos.x - unit.pos.x
+ delta_y = target_unit.pos.y - unit.pos.y
+
+ if abs(delta_x) > abs(delta_y): # east or west
+ if delta_x > 0: # east
+ target_pos = sc_common.Point2D(
+ x=unit.pos.x + self._move_amount, y=unit.pos.y
+ )
+ action_num = 4
+ else: # west
+ target_pos = sc_common.Point2D(
+ x=unit.pos.x - self._move_amount, y=unit.pos.y
+ )
+ action_num = 5
+ else: # north or south
+ if delta_y > 0: # north
+ target_pos = sc_common.Point2D(
+ x=unit.pos.x, y=unit.pos.y + self._move_amount
+ )
+ action_num = 2
+ else: # south
+ target_pos = sc_common.Point2D(
+ x=unit.pos.x, y=unit.pos.y - self._move_amount
+ )
+ action_num = 3
+
+ cmd = r_pb.ActionRawUnitCommand(
+ ability_id=actions["move"],
+ target_world_space_pos=target_pos,
+ unit_tags=[tag],
+ queue_command=False,
+ )
+ else:
+ # Attack/heal the target
+ cmd = r_pb.ActionRawUnitCommand(
+ ability_id=action_id,
+ target_unit_tag=target_tag,
+ unit_tags=[tag],
+ queue_command=False,
+ )
+
+ sc_action = sc_pb.Action(action_raw=r_pb.ActionRaw(unit_command=cmd))
+ return sc_action, action_num
+
+ def reward_battle(self):
+ """Reward function when self.reward_spare==False.
+ Returns accumulative hit/shield point damage dealt to the enemy
+ + reward_death_value per enemy unit killed, and, in case
+ self.reward_only_positive == False, - (damage dealt to ally units
+ + reward_death_value per ally unit killed) * self.reward_negative_scale
+ """
+ assert (
+ not self.stochastic_health or self.reward_only_positive
+ ), "Different Health Levels are currently only compatible with positive rewards"
+ if self.reward_sparse:
+ return 0
+
+ reward = 0
+ delta_deaths = 0
+ delta_ally = 0
+ delta_enemy = 0
+
+ neg_scale = self.reward_negative_scale
+
+ # update deaths
+ for al_id, al_unit in self.agents.items():
+ if not self.death_tracker_ally[al_id]:
+ # did not die so far
+ prev_health = (
+ self.previous_ally_units[al_id].health
+ + self.previous_ally_units[al_id].shield
+ )
+ if al_unit.health == 0:
+ # just died
+ self.death_tracker_ally[al_id] = 1
+ if not self.reward_only_positive:
+ delta_deaths -= self.reward_death_value * neg_scale
+ delta_ally += prev_health * neg_scale
+ else:
+ # still alive
+ delta_ally += neg_scale * (
+ prev_health - al_unit.health - al_unit.shield
+ )
+
+ for e_id, e_unit in self.enemies.items():
+ if not self.death_tracker_enemy[e_id]:
+ prev_health = (
+ self.previous_enemy_units[e_id].health
+ + self.previous_enemy_units[e_id].shield
+ )
+ if e_unit.health == 0:
+ self.death_tracker_enemy[e_id] = 1
+ delta_deaths += self.reward_death_value
+ delta_enemy += prev_health
+ else:
+ delta_enemy += prev_health - e_unit.health - e_unit.shield
+
+ if self.reward_only_positive:
+ reward = max(delta_enemy + delta_deaths, 0) # shield regeneration
+ else:
+ reward = delta_enemy + delta_deaths - delta_ally
+
+ return reward
+
+ def get_total_actions(self):
+ """Returns the total number of actions an agent could ever take."""
+ return self.n_actions
+
+ @staticmethod
+ def distance(x1, y1, x2, y2):
+ """Distance between two points."""
+ return math.hypot(x2 - x1, y2 - y1)
+
+ def unit_shoot_range(self, agent_id):
+ """Returns the shooting range for an agent."""
+ return 6
+
+ def unit_sight_range(self, agent_id):
+ """Returns the sight range for an agent."""
+ return 9
+
+ def unit_max_cooldown(self, unit):
+ """Returns the maximal cooldown for a unit."""
+ switcher = {
+ self.marine_id: 15,
+ self.marauder_id: 25,
+ self.medivac_id: 200, # max energy
+ self.stalker_id: 35,
+ self.zealot_id: 22,
+ self.colossus_id: 24,
+ self.hydralisk_id: 10,
+ self.zergling_id: 11,
+ self.baneling_id: 1,
+ }
+ return switcher.get(unit.unit_type, 15)
+
+ def save_replay(self):
+ """Save a replay."""
+ prefix = self.replay_prefix or self.map_name
+ replay_dir = self.replay_dir or ""
+ replay_path = self._run_config.save_replay(
+ self._controller.save_replay(),
+ replay_dir=replay_dir,
+ prefix=prefix,
+ )
+ logging.info("Replay saved at: %s" % replay_path)
+
+ def unit_max_shield(self, unit):
+ """Returns maximal shield for a given unit."""
+ if unit.unit_type == 74 or unit.unit_type == self.stalker_id:
+ return 80 # Protoss's Stalker
+ elif unit.unit_type == 73 or unit.unit_type == self.zealot_id:
+ return 50 # Protoss's Zealot
+ elif unit.unit_type == 4 or unit.unit_type == self.colossus_id:
+ return 150 # Protoss's Colossus
+ else:
+ raise Exception("Maximum shield not recognised")
+
+ def can_move(self, unit, direction):
+ """Whether a unit can move in a given direction."""
+ m = self._move_amount / 2
+
+ if direction == Direction.NORTH:
+ x, y = int(unit.pos.x), int(unit.pos.y + m)
+ elif direction == Direction.SOUTH:
+ x, y = int(unit.pos.x), int(unit.pos.y - m)
+ elif direction == Direction.EAST:
+ x, y = int(unit.pos.x + m), int(unit.pos.y)
+ else:
+ x, y = int(unit.pos.x - m), int(unit.pos.y)
+
+ if self.check_bounds(x, y) and self.pathing_grid[x, y]:
+ return True
+
+ return False
+
+ def get_surrounding_points(self, unit, include_self=False):
+ """Returns the surrounding points of the unit in 8 directions."""
+ x = int(unit.pos.x)
+ y = int(unit.pos.y)
+
+ ma = self._move_amount
+
+ points = [
+ (x, y + 2 * ma),
+ (x, y - 2 * ma),
+ (x + 2 * ma, y),
+ (x - 2 * ma, y),
+ (x + ma, y + ma),
+ (x - ma, y - ma),
+ (x + ma, y - ma),
+ (x - ma, y + ma),
+ ]
+
+ if include_self:
+ points.append((x, y))
+
+ return points
+
+ def check_bounds(self, x, y):
+ """Whether a point is within the map bounds."""
+ return 0 <= x < self.map_x and 0 <= y < self.map_y
+
+ def get_surrounding_pathing(self, unit):
+ """Returns pathing values of the grid surrounding the given unit."""
+ points = self.get_surrounding_points(unit, include_self=False)
+ vals = [
+ self.pathing_grid[x, y] if self.check_bounds(x, y) else 1
+ for x, y in points
+ ]
+ return vals
+
+ def get_surrounding_height(self, unit):
+ """Returns height values of the grid surrounding the given unit."""
+ points = self.get_surrounding_points(unit, include_self=True)
+ vals = [
+ self.terrain_height[x, y] if self.check_bounds(x, y) else 1
+ for x, y in points
+ ]
+ return vals
+
+ def _compute_health(self, agent_id, unit):
+ """Each agent has a health bar with max health
+ `health_max` and current health `health`. We set a level
+ `health_level` between `0` and `1` where the agent dies if its
+ proportional health (`health / health_max`) is below that level.
+ This function rescales health to take into account this death level.
+
+ In the proportional health scale we have something that looks like this:
+
+ -------------------------------------------------------------
+ 0 1
+ ^ health_level ^ proportional_health
+ And so we compute
+ (proportional_health - health_level) / (1 - health_level)
+ """
+ proportional_health = unit.health / unit.health_max
+ health_level = self.agent_health_levels[agent_id]
+ return (1.0 / (1 - health_level)) * (
+ proportional_health - health_level
+ )
+
+ def render_fovs(self):
+ lines_to_render = []
+ for agent_id in range(self.n_agents):
+ if not self.death_tracker_ally[agent_id]:
+ lines_to_render.extend(self.agent_cone(agent_id))
+ debug_command = d_pb.DebugCommand(
+ draw=d_pb.DebugDraw(lines=lines_to_render)
+ )
+ self._controller.debug(debug_command)
+
+ def agent_cone(self, agent_id):
+ fov_direction = self.fov_directions[agent_id]
+ c, s = np.cos(self.conic_fov_angle / 2), np.sin(
+ self.conic_fov_angle / 2
+ )
+ sight_range = self.unit_sight_range(agent_id)
+ rot = np.array([[c, -s], [s, c]]) # Contra Rotate
+ neg_rot = np.array([[c, s], [-s, c]]) # Rotate Clockwise
+ start_pos = self.new_unit_positions[agent_id]
+ init_pos = sc_common.Point(
+ x=start_pos[0],
+ y=start_pos[1],
+ z=self.get_unit_by_id(agent_id).pos.z,
+ )
+ upper_cone_end = start_pos + (rot @ fov_direction) * sight_range
+ lower_cone_end = start_pos + (neg_rot @ fov_direction) * sight_range
+ lines = [
+ d_pb.DebugLine(
+ line=d_pb.Line(
+ p0=init_pos,
+ p1=sc_common.Point(
+ x=upper_cone_end[0],
+ y=upper_cone_end[1],
+ z=init_pos.z,
+ ),
+ )
+ ),
+ d_pb.DebugLine(
+ line=d_pb.Line(
+ p0=init_pos,
+ p1=sc_common.Point(
+ x=lower_cone_end[0],
+ y=lower_cone_end[1],
+ z=init_pos.z,
+ ),
+ )
+ ),
+ ]
+ return lines
+
+ def is_position_in_cone(self, agent_id, pos, range="sight_range"):
+ ally_pos = self.get_unit_by_id(agent_id).pos
+ distance = self.distance(ally_pos.x, ally_pos.y, pos.x, pos.y)
+ # position is in this agent's cone if it is not outside the sight
+ # range and has the correct angle
+ if range == "sight_range":
+ unit_range = self.unit_sight_range(agent_id)
+ elif range == "shoot_range":
+ unit_range = self.unit_shoot_range(agent_id)
+ else:
+ raise Exception("Range argument not recognised")
+ if distance > unit_range:
+ return False
+ x_diff = pos.x - ally_pos.x
+ x_diff = max(x_diff, EPS) if x_diff > 0 else min(x_diff, -EPS)
+ obj_angle = np.arctan((pos.y - ally_pos.y) / x_diff)
+ x = self.fov_directions[agent_id][0]
+ x = max(x, EPS) if x_diff > 0 else min(x, -EPS)
+ fov_angle = np.arctan(self.fov_directions[agent_id][1] / x)
+ return np.abs(obj_angle - fov_angle) < self.conic_fov_angle / 2
+
+ def get_obs_agent(self, agent_id, fully_observable=False):
+ """Returns observation for agent_id. The observation is composed of:
+
+ - agent movement features (where it can move to, height information
+ and pathing grid)
+ - enemy features (available_to_attack, health, relative_x, relative_y,
+ shield, unit_type)
+ - ally features (visible, distance, relative_x, relative_y, shield,
+ unit_type)
+ - agent unit features (health, shield, unit_type)
+
+ All of this information is flattened and concatenated into a list,
+ in the aforementioned order. To know the sizes of each of the
+ features inside the final list of features, take a look at the
+ functions ``get_obs_move_feats_size()``,
+ ``get_obs_enemy_feats_size()``, ``get_obs_ally_feats_size()`` and
+ ``get_obs_own_feats_size()``.
+
+ The size of the observation vector may vary, depending on the
+ environment configuration and type of units present in the map.
+ For instance, non-Protoss units will not have shields, movement
+ features may or may not include terrain height and pathing grid,
+ unit_type is not included if there is only one type of unit in the
+ map etc.).
+
+ NOTE: Agents should have access only to their local observations
+ during decentralised execution.
+
+ fully_observable: -- ignores sight range for a particular unit.
+ For Debugging purposes ONLY -- not a fair observation.
+ """
+ unit = self.get_unit_by_id(agent_id)
+
+ move_feats_dim = self.get_obs_move_feats_size()
+ enemy_feats_dim = self.get_obs_enemy_feats_size()
+ ally_feats_dim = self.get_obs_ally_feats_size()
+ own_feats_dim = self.get_obs_own_feats_size()
+
+ move_feats = np.zeros(move_feats_dim, dtype=np.float32)
+ enemy_feats = np.zeros(enemy_feats_dim, dtype=np.float32)
+ ally_feats = np.zeros(ally_feats_dim, dtype=np.float32)
+ own_feats = np.zeros(own_feats_dim, dtype=np.float32)
+
+ if (
+ unit.health > 0 and self.obs_starcraft
+ ): # otherwise dead, return all zeros
+ x = unit.pos.x
+ y = unit.pos.y
+ sight_range = self.unit_sight_range(agent_id)
+
+ # Movement features. Do not need similar for looking
+ # around because this is always possible
+ avail_actions = self.get_avail_agent_actions(agent_id)
+ for m in range(self.n_actions_move):
+ move_feats[m] = avail_actions[m + 2]
+
+ ind = self.n_actions_move
+
+ if self.obs_pathing_grid:
+ move_feats[
+ ind: ind + self.n_obs_pathing # noqa
+ ] = self.get_surrounding_pathing(unit)
+ ind += self.n_obs_pathing
+
+ if self.obs_terrain_height:
+ move_feats[ind:] = self.get_surrounding_height(unit)
+
+ # Enemy features
+ for e_id, e_unit in self.enemies.items():
+ e_x = e_unit.pos.x
+ e_y = e_unit.pos.y
+ dist = self.distance(x, y, e_x, e_y)
+ enemy_visible = (
+ self.is_position_in_cone(agent_id, e_unit.pos)
+ if self.conic_fov
+ else dist < sight_range
+ )
+ if (enemy_visible and e_unit.health > 0) or (
+ e_unit.health > 0 and fully_observable
+ ): # visible and alive
+ # Sight range > shoot range
+ enemy_feats[e_id, 0] = avail_actions[
+ self.n_actions_no_attack + e_id
+ ] # available
+ enemy_feats[e_id, 1] = dist / sight_range # distance
+ enemy_feats[e_id, 2] = (
+ e_x - x
+ ) / sight_range # relative X
+ enemy_feats[e_id, 3] = (
+ e_y - y
+ ) / sight_range # relative Y
+ show_enemy = (
+ self.mask_enemies
+ and not self.enemy_mask[agent_id][e_id]
+ ) or not self.mask_enemies
+ ind = 4
+ if self.obs_all_health and show_enemy:
+ enemy_feats[e_id, ind] = (
+ e_unit.health / e_unit.health_max
+ ) # health
+ ind += 1
+ if self.shield_bits_enemy > 0:
+ max_shield = self.unit_max_shield(e_unit)
+ enemy_feats[e_id, ind] = (
+ e_unit.shield / max_shield
+ ) # shield
+ ind += 1
+
+ if self.unit_type_bits > 0 and show_enemy:
+ type_id = self.get_unit_type_id(e_unit, False)
+ enemy_feats[e_id, ind + type_id] = 1 # unit type
+
+ # Ally features
+ al_ids = [
+ al_id for al_id in range(self.n_agents) if al_id != agent_id
+ ]
+ for i, al_id in enumerate(al_ids):
+
+ al_unit = self.get_unit_by_id(al_id)
+ al_x = al_unit.pos.x
+ al_y = al_unit.pos.y
+ dist = self.distance(x, y, al_x, al_y)
+ ally_visible = (
+ self.is_position_in_cone(agent_id, al_unit.pos)
+ if self.conic_fov
+ else dist < sight_range
+ )
+ if (ally_visible and al_unit.health > 0) or (
+ al_unit.health > 0 and fully_observable
+ ): # visible and alive
+ ally_feats[i, 0] = 1 # visible
+ ally_feats[i, 1] = dist / sight_range # distance
+ ally_feats[i, 2] = (al_x - x) / sight_range # relative X
+ ally_feats[i, 3] = (al_y - y) / sight_range # relative Y
+
+ ind = 4
+ if self.obs_all_health:
+ if not self.stochastic_health:
+ ally_feats[i, ind] = (
+ al_unit.health / al_unit.health_max
+ ) # health
+ ind += 1
+ elif self.observe_teammate_health:
+ ally_feats[i, ind] = self._compute_health(
+ agent_id=al_id, unit=al_unit
+ )
+ ind += 1
+ elif self.zero_pad_health:
+ ind += 1
+ if self.shield_bits_ally > 0:
+ max_shield = self.unit_max_shield(al_unit)
+ ally_feats[i, ind] = (
+ al_unit.shield / max_shield
+ ) # shield
+ ind += 1
+ if self.stochastic_attack and self.observe_attack_probs:
+ ally_feats[i, ind] = self.agent_attack_probabilities[
+ al_id
+ ]
+ ind += 1
+ elif (
+ self.stochastic_attack
+ and self.zero_pad_stochastic_attack
+ ):
+ ind += 1
+
+ if self.stochastic_health and self.observe_teammate_health:
+ ally_feats[i, ind] = self.agent_health_levels[al_id]
+ ind += 1
+ elif self.stochastic_health and self.zero_pad_health:
+ ind += 1
+ if self.unit_type_bits > 0 and (
+ not self.replace_teammates
+ or self.observe_teammate_types
+ ):
+ type_id = self.get_unit_type_id(al_unit, True)
+ ally_feats[i, ind + type_id] = 1
+ ind += self.unit_type_bits
+ elif self.unit_type_bits > 0 and self.zero_pad_unit_types:
+ ind += self.unit_type_bits
+ if self.obs_last_action:
+ ally_feats[i, ind:] = self.last_action[al_id]
+
+ # Own features
+ ind = 0
+ if self.obs_own_health:
+ if not self.stochastic_health:
+ own_feats[ind] = unit.health / unit.health_max
+ else:
+ own_feats[ind] = self._compute_health(agent_id, unit)
+ ind += 1
+ if self.shield_bits_ally > 0:
+ max_shield = self.unit_max_shield(unit)
+ own_feats[ind] = unit.shield / max_shield
+ ind += 1
+
+ if self.stochastic_attack:
+ own_feats[ind] = self.agent_attack_probabilities[agent_id]
+ ind += 1
+ if self.stochastic_health:
+ own_feats[ind] = self.agent_health_levels[agent_id]
+ ind += 1
+ if self.obs_own_pos:
+ own_feats[ind] = x / self.map_x
+ own_feats[ind + 1] = y / self.map_y
+ ind += 2
+ if self.conic_fov:
+ own_feats[ind: ind + 2] = self.fov_directions[agent_id]
+ ind += 2
+ if self.unit_type_bits > 0:
+ type_id = self.get_unit_type_id(unit, True)
+ own_feats[ind + type_id] = 1
+ if self.obs_starcraft:
+ agent_obs = np.concatenate(
+ (
+ move_feats.flatten(),
+ enemy_feats.flatten(),
+ ally_feats.flatten(),
+ own_feats.flatten(),
+ )
+ )
+
+ if self.obs_timestep_number:
+ if self.obs_starcraft:
+ agent_obs = np.append(
+ agent_obs, self._episode_steps / self.episode_limit
+ )
+ else:
+ agent_obs = np.zeros(1, dtype=np.float32)
+ agent_obs[:] = self._episode_steps / self.episode_limit
+
+ if self.debug:
+ logging.debug("Obs Agent: {}".format(agent_id).center(60, "-"))
+ logging.debug(
+ "Avail. actions {}".format(
+ self.get_avail_agent_actions(agent_id)
+ )
+ )
+ logging.debug("Move feats {}".format(move_feats))
+ logging.debug("Enemy feats {}".format(enemy_feats))
+ logging.debug("Ally feats {}".format(ally_feats))
+ logging.debug("Own feats {}".format(own_feats))
+
+ return agent_obs
+
+ def get_obs(self):
+ """Returns all agent observations in a list.
+ NOTE: Agents should have access only to their local observations
+ during decentralised execution.
+ """
+ agents_obs = [
+ self.get_obs_agent(i, fully_observable=self.fully_observable)
+ for i in range(self.n_agents)
+ ]
+ return agents_obs
+
+ def get_capabilities_agent(self, agent_id):
+ unit = self.get_unit_by_id(agent_id)
+ cap_feats = np.zeros(self.get_cap_size(), dtype=np.float32)
+
+ ind = 0
+ if self.stochastic_attack:
+ cap_feats[ind] = self.agent_attack_probabilities[agent_id]
+ ind += 1
+ if self.stochastic_health:
+ cap_feats[ind] = self.agent_health_levels[agent_id]
+ ind += 1
+ if self.unit_type_bits > 0:
+ type_id = self.get_unit_type_id(unit, True)
+ cap_feats[ind + type_id] = 1
+
+ return cap_feats
+
+ def get_capabilities(self):
+ """Returns all agent capabilities in a list."""
+ agents_cap = [
+ self.get_capabilities_agent(i) for i in range(self.n_agents)
+ ]
+ agents_cap = np.concatenate(agents_cap, axis=0).astype(np.float32)
+ return agents_cap
+
+ def get_state(self):
+ """Returns the global state.
+ NOTE: This function should not be used during decentralised execution.
+ """
+ if self.obs_instead_of_state:
+ obs_concat = np.concatenate(self.get_obs(), axis=0).astype(
+ np.float32
+ )
+ return obs_concat
+
+ state_dict = self.get_state_dict()
+
+ state = np.append(
+ state_dict["allies"].flatten(), state_dict["enemies"].flatten()
+ )
+ if "last_action" in state_dict:
+ state = np.append(state, state_dict["last_action"].flatten())
+ if "timestep" in state_dict:
+ state = np.append(state, state_dict["timestep"])
+
+ state = state.astype(dtype=np.float32)
+
+ if self.debug:
+ logging.debug("STATE".center(60, "-"))
+ logging.debug("Ally state {}".format(state_dict["allies"]))
+ logging.debug("Enemy state {}".format(state_dict["enemies"]))
+ if self.state_last_action:
+ logging.debug("Last actions {}".format(self.last_action))
+
+ return state
+
+ def get_ally_num_attributes(self):
+ return len(self.ally_state_attr_names) + len(
+ self.capability_attr_names
+ )
+
+ def get_enemy_num_attributes(self):
+ return len(self.enemy_state_attr_names)
+
+ def get_state_dict(self):
+ """Returns the global state as a dictionary.
+
+ - allies: numpy array containing agents and their attributes
+ - enemies: numpy array containing enemies and their attributes
+ - last_action: numpy array of previous actions for each agent
+ - timestep: current no. of steps divided by total no. of steps
+
+ NOTE: This function should not be used during decentralised execution.
+ """
+
+ # number of features equals the number of attribute names
+ nf_al = self.get_ally_num_attributes()
+ nf_en = self.get_enemy_num_attributes()
+
+ ally_state = np.zeros((self.n_agents, nf_al))
+ enemy_state = np.zeros((self.n_enemies, nf_en))
+
+ center_x = self.map_x / 2
+ center_y = self.map_y / 2
+
+ for al_id, al_unit in self.agents.items():
+ if al_unit.health > 0:
+ x = al_unit.pos.x
+ y = al_unit.pos.y
+ max_cd = self.unit_max_cooldown(al_unit)
+ if not self.stochastic_health:
+ ally_state[al_id, 0] = (
+ al_unit.health / al_unit.health_max
+ ) # health
+ else:
+ ally_state[al_id, 0] = self._compute_health(al_id, al_unit)
+ if (
+ self.map_type in ["MMM", "terran_gen"]
+ and al_unit.unit_type == self.medivac_id
+ ):
+ ally_state[al_id, 1] = al_unit.energy / max_cd # energy
+ else:
+ ally_state[al_id, 1] = (
+ al_unit.weapon_cooldown / max_cd
+ ) # cooldown
+ ally_state[al_id, 2] = (
+ x - center_x
+ ) / self.max_distance_x # relative X
+ ally_state[al_id, 3] = (
+ y - center_y
+ ) / self.max_distance_y # relative Y
+
+ ind = 4
+ if self.shield_bits_ally > 0:
+ max_shield = self.unit_max_shield(al_unit)
+ ally_state[al_id, ind] = (
+ al_unit.shield / max_shield
+ ) # shield
+ ind += 1
+
+ if self.stochastic_attack:
+ ally_state[al_id, ind] = self.agent_attack_probabilities[
+ al_id
+ ]
+ ind += 1
+ if self.stochastic_health:
+ ally_state[al_id, ind] = self.agent_health_levels[al_id]
+ ind += 1
+ if self.conic_fov:
+ ally_state[al_id, ind: ind + 2] = self.fov_directions[
+ al_id
+ ]
+ if self.unit_type_bits > 0:
+ type_id = self.get_unit_type_id(al_unit, True)
+ ally_state[al_id, type_id - self.unit_type_bits] = 1
+
+ for e_id, e_unit in self.enemies.items():
+ if e_unit.health > 0:
+ x = e_unit.pos.x
+ y = e_unit.pos.y
+
+ enemy_state[e_id, 0] = (
+ e_unit.health / e_unit.health_max
+ ) # health
+ enemy_state[e_id, 1] = (
+ x - center_x
+ ) / self.max_distance_x # relative X
+ enemy_state[e_id, 2] = (
+ y - center_y
+ ) / self.max_distance_y # relative Y
+
+ if self.shield_bits_enemy > 0:
+ max_shield = self.unit_max_shield(e_unit)
+ enemy_state[e_id, 3] = e_unit.shield / max_shield # shield
+
+ if self.unit_type_bits > 0:
+ type_id = self.get_unit_type_id(e_unit, False)
+ enemy_state[e_id, type_id - self.unit_type_bits] = 1
+
+ state = {"allies": ally_state, "enemies": enemy_state}
+
+ if self.state_last_action:
+ state["last_action"] = self.last_action
+ if self.state_timestep_number:
+ state["timestep"] = self._episode_steps / self.episode_limit
+
+ return state
+
+ def get_obs_enemy_feats_size(self):
+ """Returns the dimensions of the matrix containing enemy features.
+ Size is n_enemies x n_features.
+ """
+ nf_en = 4 + self.unit_type_bits
+
+ if self.obs_all_health:
+ nf_en += 1 + self.shield_bits_enemy
+
+ return self.n_enemies, nf_en
+
+ def get_obs_ally_feats_size(self):
+ """Returns the dimensions of the matrix containing ally features.
+ Size is n_allies x n_features.
+ """
+ nf_al = 4
+ nf_cap = self.get_obs_ally_capability_size()
+
+ if self.obs_all_health:
+ nf_al += 1 + self.shield_bits_ally
+
+ if self.obs_last_action:
+ nf_al += self.n_actions
+
+ return self.n_agents - 1, nf_al + nf_cap
+
+ def get_obs_own_feats_size(self):
+ """
+ Returns the size of the vector containing the agents' own features.
+ """
+ own_feats = self.get_cap_size()
+ if self.obs_own_health and self.obs_starcraft:
+ own_feats += 1 + self.shield_bits_ally
+ if self.conic_fov and self.obs_starcraft:
+ own_feats += 2
+ if self.obs_own_pos and self.obs_starcraft:
+ own_feats += 2
+ return own_feats
+
+ def get_obs_move_feats_size(self):
+ """Returns the size of the vector containing the agents's movement-
+ related features.
+ """
+ move_feats = self.n_actions_move
+ if self.obs_pathing_grid:
+ move_feats += self.n_obs_pathing
+ if self.obs_terrain_height:
+ move_feats += self.n_obs_height
+
+ return move_feats
+
+ def get_obs_ally_capability_size(self):
+ """Returns the size of capabilities observed by teammates."""
+ cap_feats = self.unit_type_bits
+ if self.stochastic_attack and (
+ self.zero_pad_stochastic_attack or self.observe_attack_probs
+ ):
+ cap_feats += 1
+ if self.stochastic_health and (
+ self.observe_teammate_health or self.zero_pad_health
+ ):
+ cap_feats += 1
+
+ return cap_feats
+
+ def get_cap_size(self):
+ """Returns the size of the own capabilities of the agent."""
+ cap_feats = 0
+ if self.stochastic_attack:
+ cap_feats += 1
+ if self.stochastic_health:
+ cap_feats += 1
+ if self.unit_type_bits > 0:
+ cap_feats += self.unit_type_bits
+
+ return cap_feats
+
+ def get_obs_size(self):
+ """Returns the size of the observation."""
+ own_feats = self.get_obs_own_feats_size()
+ move_feats = self.get_obs_move_feats_size()
+
+ n_enemies, n_enemy_feats = self.get_obs_enemy_feats_size()
+ n_allies, n_ally_feats = self.get_obs_ally_feats_size()
+
+ enemy_feats = n_enemies * n_enemy_feats
+ ally_feats = n_allies * n_ally_feats
+ if self.obs_starcraft:
+ return (
+ self.obs_timestep_number
+ + move_feats
+ + enemy_feats
+ + ally_feats
+ + own_feats
+ )
+ else:
+ return 1 if self.obs_timestep_number else 0
+
+ def get_state_size(self):
+ """Returns the size of the global state."""
+ if self.obs_instead_of_state:
+ return self.get_obs_size() * self.n_agents
+
+ nf_al = self.get_ally_num_attributes()
+ nf_en = self.get_enemy_num_attributes()
+
+ enemy_state = self.n_enemies * nf_en
+ ally_state = self.n_agents * nf_al
+
+ size = enemy_state + ally_state
+
+ if self.state_last_action:
+ size += self.n_agents * self.n_actions
+ if self.state_timestep_number:
+ size += 1
+
+ return size
+
+ def get_visibility_matrix(self):
+ """Returns a boolean numpy array of dimensions
+ (n_agents, n_agents + n_enemies) indicating which units
+ are visible to each agent.
+ """
+ arr = np.zeros(
+ (self.n_agents, self.n_agents + self.n_enemies),
+ dtype=np.bool,
+ )
+
+ for agent_id in range(self.n_agents):
+ current_agent = self.get_unit_by_id(agent_id)
+ if current_agent.health > 0: # it agent not dead
+ x = current_agent.pos.x
+ y = current_agent.pos.y
+ sight_range = self.unit_sight_range(agent_id)
+
+ # Enemies
+ for e_id, e_unit in self.enemies.items():
+ e_x = e_unit.pos.x
+ e_y = e_unit.pos.y
+ dist = self.distance(x, y, e_x, e_y)
+
+ if dist < sight_range and e_unit.health > 0:
+ # visible and alive
+ arr[agent_id, self.n_agents + e_id] = 1
+
+ # The matrix for allies is filled symmetrically
+ al_ids = [
+ al_id for al_id in range(self.n_agents) if al_id > agent_id
+ ]
+ for _, al_id in enumerate(al_ids):
+ al_unit = self.get_unit_by_id(al_id)
+ al_x = al_unit.pos.x
+ al_y = al_unit.pos.y
+ dist = self.distance(x, y, al_x, al_y)
+
+ if dist < sight_range and al_unit.health > 0:
+ # visible and alive
+ arr[agent_id, al_id] = arr[al_id, agent_id] = 1
+
+ return arr
+
+ def get_unit_type_id(self, unit, ally):
+ """Returns the ID of unit type in the given scenario."""
+
+ if self.map_type == "protoss_gen":
+ if unit.unit_type in (self.stalker_id, Protoss.Stalker):
+ return 0
+ if unit.unit_type in (self.zealot_id, Protoss.Zealot):
+ return 1
+ if unit.unit_type in (self.colossus_id, Protoss.Colossus):
+ return 2
+ raise AttributeError()
+ if self.map_type == "terran_gen":
+ if unit.unit_type in (self.marine_id, Terran.Marine):
+ return 0
+ if unit.unit_type in (self.marauder_id, Terran.Marauder):
+ return 1
+ if unit.unit_type in (self.medivac_id, Terran.Medivac):
+ return 2
+ raise AttributeError()
+
+ if self.map_type == "zerg_gen":
+ if unit.unit_type in (self.zergling_id, Zerg.Zergling):
+ return 0
+ if unit.unit_type in (self.hydralisk_id, Zerg.Hydralisk):
+ return 1
+ if unit.unit_type in (self.baneling_id, Zerg.Baneling):
+ return 2
+ raise AttributeError()
+
+ # Old stuff
+ if ally: # use new SC2 unit types
+ type_id = unit.unit_type - self._min_unit_type
+
+ if self.map_type == "stalkers_and_zealots":
+ # id(Stalker) = 74, id(Zealot) = 73
+ type_id = unit.unit_type - 73
+ elif self.map_type == "colossi_stalkers_zealots":
+ # id(Stalker) = 74, id(Zealot) = 73, id(Colossus) = 4
+ if unit.unit_type == 4:
+ type_id = 0
+ elif unit.unit_type == 74:
+ type_id = 1
+ else:
+ type_id = 2
+ elif self.map_type == "bane":
+ if unit.unit_type == 9:
+ type_id = 0
+ else:
+ type_id = 1
+ elif self.map_type == "MMM":
+ if unit.unit_type == 51:
+ type_id = 0
+ elif unit.unit_type == 48:
+ type_id = 1
+ else:
+ type_id = 2
+
+ return type_id
+
+ def get_avail_agent_actions(self, agent_id):
+ """Returns the available actions for agent_id."""
+ unit = self.get_unit_by_id(agent_id)
+ if unit.health > 0:
+ # cannot choose no-op when alive
+ avail_actions = [0] * self.n_actions
+
+ # stop should be allowed
+ avail_actions[1] = 1
+
+ # see if we can move
+ if self.can_move(unit, Direction.NORTH):
+ avail_actions[2] = 1
+ if self.can_move(unit, Direction.SOUTH):
+ avail_actions[3] = 1
+ if self.can_move(unit, Direction.EAST):
+ avail_actions[4] = 1
+ if self.can_move(unit, Direction.WEST):
+ avail_actions[5] = 1
+
+ if self.conic_fov:
+ avail_actions[6: 6 + self.n_fov_actions] = [1] * self.n_fov_actions
+
+ # Can attack only alive units that are alive in the shooting range
+ shoot_range = self.unit_shoot_range(agent_id)
+
+ target_items = self.enemies.items()
+ if self.map_type in ["MMM", "terran_gen"] and unit.unit_type == self.medivac_id:
+ # Medivacs cannot heal themselves or other flying units
+ target_items = [
+ (t_id, t_unit)
+ for (t_id, t_unit) in self.agents.items()
+ if t_unit.unit_type != self.medivac_id
+ ]
+ # should we only be able to target people in the cone?
+ for t_id, t_unit in target_items:
+ if t_unit.health > 0:
+ dist = self.distance(
+ unit.pos.x, unit.pos.y, t_unit.pos.x, t_unit.pos.y
+ )
+ can_shoot = (
+ dist <= shoot_range
+ if not self.conic_fov
+ else self.is_position_in_cone(
+ agent_id, t_unit.pos, range="shoot_range"
+ )
+ )
+ if can_shoot:
+ avail_actions[t_id + self.n_actions_no_attack] = 1
+
+ return avail_actions
+
+ else:
+ # only no-op allowed
+ return [1] + [0] * (self.n_actions - 1)
+
+ def get_avail_actions(self):
+ """Returns the available actions of all agents in a list."""
+ avail_actions = []
+ for agent_id in range(self.n_agents):
+ avail_agent = self.get_avail_agent_actions(agent_id)
+ avail_actions.append(avail_agent)
+ return avail_actions
+
+ def close(self):
+ """Close StarCraft II."""
+ if self.renderer is not None:
+ self.renderer.close()
+ self.renderer = None
+ if self._sc2_proc:
+ self._sc2_proc.close()
+
+ def seed(self):
+ """Returns the random seed used by the environment."""
+ return self._seed
+
+ def render(self, mode="human"):
+ if self.renderer is None:
+ from smac.env.starcraft2.render import StarCraft2Renderer
+
+ self.renderer = StarCraft2Renderer(self, mode)
+ assert (
+ mode == self.renderer.mode
+ ), "mode must be consistent across render calls"
+ return self.renderer.render(mode)
+
+ def _kill_units(self, unit_tags):
+ debug_command = [
+ d_pb.DebugCommand(kill_unit=d_pb.DebugKillUnit(tag=unit_tags))
+ ]
+ self._controller.debug(debug_command)
+
+ def _kill_all_units(self):
+ """Kill all units on the map. Steps controller and so can throw
+ exceptions"""
+ units = [unit.tag for unit in self._obs.observation.raw_data.units]
+ self._kill_units(units)
+ # check the units are dead
+ units = len(self._obs.observation.raw_data.units)
+ while len(self._obs.observation.raw_data.units) > 0:
+ self._controller.step(2)
+ self._obs = self._controller.observe()
+
+ def _create_new_team(self, team, episode_config):
+ # unit_names = {
+ # self.id_to_unit_name_map[unit.unit_type]
+ # for unit in self.agents.values()
+ # }
+ # It's important to set the number of agents and enemies
+ # because we use that to identify whether all the units have
+ # been created successfully
+
+ # TODO hardcoding init location. change this later for new maps
+ if not self.random_start:
+ ally_init_pos = [sc_common.Point2D(x=8, y=16)] * self.n_agents
+ # Spawning location of enemy units
+ enemy_init_pos = [sc_common.Point2D(x=24, y=16)] * self.n_enemies
+ else:
+ ally_init_pos = [
+ sc_common.Point2D(
+ x=self.ally_start_positions[i][0],
+ y=self.ally_start_positions[i][1],
+ )
+ for i in range(self.ally_start_positions.shape[0])
+ ]
+ enemy_init_pos = [
+ sc_common.Point2D(
+ x=self.enemy_start_positions[i][0],
+ y=self.enemy_start_positions[i][1],
+ )
+ for i in range(self.enemy_start_positions.shape[0])
+ ]
+ for unit_id, unit in enumerate(team):
+ unit_type_ally = self._convert_unit_name_to_unit_type(
+ unit, ally=True
+ )
+ debug_command = [
+ d_pb.DebugCommand(
+ create_unit=d_pb.DebugCreateUnit(
+ unit_type=unit_type_ally,
+ owner=1,
+ pos=ally_init_pos[unit_id],
+ quantity=1,
+ )
+ )
+ ]
+ self._controller.debug(debug_command)
+
+ unit_type_enemy = self._convert_unit_name_to_unit_type(
+ unit, ally=False
+ )
+ debug_command = [
+ d_pb.DebugCommand(
+ create_unit=d_pb.DebugCreateUnit(
+ unit_type=unit_type_enemy,
+ owner=2,
+ pos=enemy_init_pos[unit_id],
+ quantity=1,
+ )
+ )
+ ]
+ self._controller.debug(debug_command)
+
+ try:
+ self._controller.step(1)
+ self._obs = self._controller.observe()
+ except (protocol.ProtocolError, protocol.ConnectionError):
+ self.full_restart()
+ self.reset(episode_config=episode_config)
+
+ def _convert_unit_name_to_unit_type(self, unit_name, ally=True):
+ if ally:
+ return self.ally_unit_map[unit_name]
+ else:
+ return self.enemy_unit_map[unit_name]
+
+ def init_units(self, team, episode_config={}):
+ """Initialise the units."""
+ if team:
+ # can use any value for min unit type because
+ # it is hardcoded based on the version
+ self._init_ally_unit_types(0)
+ self._create_new_team(team, episode_config)
+ while True:
+ # Sometimes not all units have yet been created by SC2
+ self.agents = {}
+ self.enemies = {}
+
+ ally_units = [
+ unit
+ for unit in self._obs.observation.raw_data.units
+ if unit.owner == 1
+ ]
+ ally_units_sorted = sorted(
+ ally_units,
+ key=attrgetter("unit_type", "pos.x", "pos.y"),
+ reverse=False,
+ )
+
+ for i in range(len(ally_units_sorted)):
+ self.agents[i] = ally_units_sorted[i]
+ if self.debug:
+ logging.debug(
+ "Unit {} is {}, x = {}, y = {}".format(
+ len(self.agents),
+ self.agents[i].unit_type,
+ self.agents[i].pos.x,
+ self.agents[i].pos.y,
+ )
+ )
+
+ for unit in self._obs.observation.raw_data.units:
+ if unit.owner == 2:
+ self.enemies[len(self.enemies)] = unit
+ if self._episode_count == 0:
+ self.max_reward += unit.health_max + unit.shield_max
+
+ if self._episode_count == 0 and not team:
+ min_unit_type = min(
+ unit.unit_type for unit in self.agents.values()
+ )
+ self._init_ally_unit_types(min_unit_type)
+
+ all_agents_created = len(self.agents) == self.n_agents
+ all_enemies_created = len(self.enemies) == self.n_enemies
+
+ self._unit_types = [
+ unit.unit_type for unit in ally_units_sorted
+ ] + [
+ unit.unit_type
+ for unit in self._obs.observation.raw_data.units
+ if unit.owner == 2
+ ]
+
+ # TODO move this to the start
+ if all_agents_created and all_enemies_created: # all good
+ return
+
+ try:
+ self._controller.step(1)
+ self._obs = self._controller.observe()
+ except (protocol.ProtocolError, protocol.ConnectionError):
+ self.full_restart()
+ self.reset(episode_config=episode_config)
+
+ def get_unit_types(self):
+ if self._unit_types is None:
+ warn(
+ "unit types have not been initialized yet, please call"
+ "env.reset() to populate this and call t1286he method again."
+ )
+
+ return self._unit_types
+
+ def update_units(self):
+ """Update units after an environment step.
+ This function assumes that self._obs is up-to-date.
+ """
+ n_ally_alive = 0
+ n_enemy_alive = 0
+
+ # Store previous state
+ self.previous_ally_units = deepcopy(self.agents)
+ self.previous_enemy_units = deepcopy(self.enemies)
+
+ for al_id, al_unit in self.agents.items():
+ updated = False
+ for unit in self._obs.observation.raw_data.units:
+ if al_unit.tag == unit.tag:
+ self.agents[al_id] = unit
+ updated = True
+ n_ally_alive += 1
+ break
+
+ if not updated: # dead
+ al_unit.health = 0
+
+ for e_id, e_unit in self.enemies.items():
+ updated = False
+ for unit in self._obs.observation.raw_data.units:
+ if e_unit.tag == unit.tag:
+ self.enemies[e_id] = unit
+ updated = True
+ n_enemy_alive += 1
+ break
+
+ if not updated: # dead
+ e_unit.health = 0
+
+ if (
+ n_ally_alive == 0
+ and n_enemy_alive > 0
+ or self.only_medivac_left(ally=True)
+ ):
+ return -1 # lost
+ if (
+ n_ally_alive > 0
+ and n_enemy_alive == 0
+ or self.only_medivac_left(ally=False)
+ ):
+ return 1 # won
+ if n_ally_alive == 0 and n_enemy_alive == 0:
+ return 0
+
+ return None
+
+ def _register_unit_mapping(self, unit_name, unit_type_id):
+ self.id_to_unit_name_map[unit_type_id] = unit_name
+ self.unit_name_to_id_map[unit_name] = unit_type_id
+
+ def _init_ally_unit_types(self, min_unit_type):
+ """Initialise ally unit types. Should be called once from the
+ init_units function.
+ """
+
+ self._min_unit_type = min_unit_type
+
+ if "10gen_" in self.map_name:
+ num_rl_units = 9
+ self._min_unit_type = (
+ len(self._controller.data().units) - num_rl_units
+ )
+
+ self.baneling_id = self._min_unit_type
+ self.colossus_id = self._min_unit_type + 1
+ self.hydralisk_id = self._min_unit_type + 2
+ self.marauder_id = self._min_unit_type + 3
+ self.marine_id = self._min_unit_type + 4
+ self.medivac_id = self._min_unit_type + 5
+ self.stalker_id = self._min_unit_type + 6
+ self.zealot_id = self._min_unit_type + 7
+ self.zergling_id = self._min_unit_type + 8
+
+ self.ally_unit_map = {
+ "baneling": self.baneling_id,
+ "colossus": self.colossus_id,
+ "hydralisk": self.hydralisk_id,
+ "marauder": self.marauder_id,
+ "marine": self.marine_id,
+ "medivac": self.medivac_id,
+ "stalker": self.stalker_id,
+ "zealot": self.zealot_id,
+ "zergling": self.zergling_id,
+ }
+ self.enemy_unit_map = {
+ "baneling": Zerg.Baneling,
+ "colossus": Protoss.Colossus,
+ "hydralisk": Zerg.Hydralisk,
+ "marauder": Terran.Marauder,
+ "marine": Terran.Marine,
+ "medivac": Terran.Medivac,
+ "stalker": Protoss.Stalker,
+ "zealot": Protoss.Zealot,
+ "zergling": Zerg.Zergling,
+ }
+
+ else:
+ if self.map_type == "marines":
+ self.marine_id = min_unit_type
+ self._register_unit_mapping("marine", min_unit_type)
+ elif self.map_type == "stalkers_and_zealots":
+ self.stalker_id = min_unit_type
+ self._register_unit_mapping("stalker", min_unit_type)
+ self.zealot_id = min_unit_type + 1
+ self._register_unit_mapping("zealot", min_unit_type + 1)
+ elif self.map_type == "colossi_stalkers_zealots":
+ self.colossus_id = min_unit_type
+ self._register_unit_mapping("colossus", min_unit_type)
+ self.stalker_id = min_unit_type + 1
+ self._register_unit_mapping("stalker", min_unit_type + 1)
+ self.zealot_id = min_unit_type + 2
+ self._register_unit_mapping("zealot", min_unit_type + 2)
+ elif self.map_type == "MMM":
+ self.marauder_id = min_unit_type
+ self._register_unit_mapping("marauder", min_unit_type)
+ self.marine_id = min_unit_type + 1
+ self._register_unit_mapping("marine", min_unit_type + 1)
+ self.medivac_id = min_unit_type + 2
+ self._register_unit_mapping("medivac", min_unit_type + 2)
+ elif self.map_type == "zealots":
+ self.zealot_id = min_unit_type
+ self._register_unit_mapping("zealot", min_unit_type)
+ elif self.map_type == "hydralisks":
+ self.hydralisk_id = min_unit_type
+ self._register_unit_mapping("hydralisk", min_unit_type)
+ elif self.map_type == "stalkers":
+ self.stalker_id = min_unit_type
+ self._register_unit_mapping("stalker", min_unit_type)
+ elif self.map_type == "colossus":
+ self.colossus_id = min_unit_type
+ self._register_unit_mapping("colossus", min_unit_type)
+ elif self.map_type == "bane":
+ self.baneling_id = min_unit_type
+ self._register_unit_mapping("baneling", min_unit_type)
+ self.zergling_id = min_unit_type + 1
+ self._register_unit_mapping("zergling", min_unit_type + 1)
+
+ def only_medivac_left(self, ally):
+ """Check if only Medivac units are left."""
+ if self.map_type != "MMM" and self.map_type != "terran_gen":
+ return False
+
+ if ally:
+ units_alive = [
+ a
+ for a in self.agents.values()
+ if (a.health > 0 and a.unit_type != self.medivac_id)
+ ]
+ if len(units_alive) == 0:
+ return True
+ return False
+ else:
+ units_alive = [
+ a
+ for a in self.enemies.values()
+ if (a.health > 0 and a.unit_type != Terran.Medivac)
+ ]
+ if len(units_alive) == 0:
+ return True
+ return False
+
+ def get_unit_by_id(self, a_id):
+ """Get unit by ID."""
+ return self.agents[a_id]
+
+ def get_stats(self):
+ stats = {
+ "battles_won": self.battles_won,
+ "battles_game": self.battles_game,
+ "battles_draw": self.timeouts,
+ "win_rate": self.battles_won / self.battles_game,
+ "timeouts": self.timeouts,
+ "restarts": self.force_restarts,
+ }
+ return stats
+
+ def get_env_info(self):
+ env_info = super().get_env_info()
+ env_info["agent_features"] = (
+ self.ally_state_attr_names + self.capability_attr_names
+ )
+ env_info["enemy_features"] = self.enemy_state_attr_names
+ return env_info
diff --git a/src/envs/smac_v2/official/starcraft2_hxt.py b/src/envs/smac_v2/official/starcraft2_hxt.py
new file mode 100644
index 0000000..6bc6423
--- /dev/null
+++ b/src/envs/smac_v2/official/starcraft2_hxt.py
@@ -0,0 +1,2303 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from envs.multiagentenv import MultiAgentEnv
+from .maps import get_map_params
+
+import atexit
+from warnings import warn
+from operator import attrgetter
+from copy import deepcopy
+import numpy as np
+import enum
+import math
+from absl import logging
+from pysc2.lib.units import Neutral, Protoss, Terran, Zerg
+
+from pysc2 import maps
+from pysc2 import run_configs
+from pysc2.lib import protocol
+
+from s2clientprotocol import common_pb2 as sc_common
+from s2clientprotocol import sc2api_pb2 as sc_pb
+from s2clientprotocol import raw_pb2 as r_pb
+from s2clientprotocol import debug_pb2 as d_pb
+
+races = {
+ "R": sc_common.Random,
+ "P": sc_common.Protoss,
+ "T": sc_common.Terran,
+ "Z": sc_common.Zerg,
+}
+
+difficulties = {
+ "1": sc_pb.VeryEasy,
+ "2": sc_pb.Easy,
+ "3": sc_pb.Medium,
+ "4": sc_pb.MediumHard,
+ "5": sc_pb.Hard,
+ "6": sc_pb.Harder,
+ "7": sc_pb.VeryHard,
+ "8": sc_pb.CheatVision,
+ "9": sc_pb.CheatMoney,
+ "A": sc_pb.CheatInsane,
+}
+
+actions = {
+ "move": 16, # target: PointOrUnit
+ "attack": 23, # target: PointOrUnit
+ "stop": 4, # target: None
+ "heal": 386, # Unit
+}
+
+
+class Direction(enum.IntEnum):
+ NORTH = 0
+ SOUTH = 1
+ EAST = 2
+ WEST = 3
+
+
+EPS = 1e-7
+
+
+class StarCraft2Env(MultiAgentEnv):
+ """The StarCraft II environment for decentralised multi-agent
+ micromanagement scenarios.
+ """
+
+ def __init__(
+ self,
+ map_name="8m",
+ step_mul=8,
+ move_amount=2,
+ difficulty="7",
+ game_version=None,
+ seed=None,
+ continuing_episode=False,
+ obs_all_health=True,
+ obs_own_health=True,
+ obs_last_action=False,
+ obs_pathing_grid=False,
+ obs_terrain_height=False,
+ obs_instead_of_state=False,
+ obs_timestep_number=False,
+ obs_own_pos=False,
+ obs_starcraft=True,
+ conic_fov=False,
+ num_fov_actions=4,
+ state_last_action=True,
+ state_timestep_number=False,
+ reward_sparse=False,
+ reward_only_positive=True,
+ reward_death_value=10,
+ reward_win=200,
+ reward_defeat=0,
+ reward_negative_scale=0.5,
+ reward_scale=True,
+ reward_scale_rate=20,
+ kill_unit_step_mul=2,
+ fully_observable=False,
+ capability_config={},
+ replay_dir="",
+ replay_prefix="",
+ window_size_x=1920,
+ window_size_y=1200,
+ heuristic_ai=False,
+ heuristic_rest=False,
+ debug=False,
+ ):
+ """
+ Create a StarCraftC2Env environment.
+
+ Parameters
+ ----------
+ map_name : str, optional
+ The name of the SC2 map to play (default is "8m"). The full list
+ can be found by running bin/map_list.
+ step_mul : int, optional
+ How many game steps per agent step (default is 8). None
+ indicates to use the default map step_mul.
+ move_amount : float, optional
+ How far away units are ordered to move per step (default is 2).
+ difficulty : str, optional
+ The difficulty of built-in computer AI bot (default is "7").
+ game_version : str, optional
+ StarCraft II game version (default is None). None indicates the
+ latest version.
+ seed : int, optional
+ Random seed used during game initialisation. This allows to
+ continuing_episode : bool, optional
+ Whether to consider episodes continuing or finished after time
+ limit is reached (default is False).
+ obs_all_health : bool, optional
+ Agents receive the health of all units (in the sight range) as part
+ of observations (default is True).
+ obs_own_health : bool, optional
+ Agents receive their own health as a part of observations (default
+ is False). This flag is ignored when obs_all_health == True.
+ obs_last_action : bool, optional
+ Agents receive the last actions of all units (in the sight range)
+ as part of observations (default is False).
+ obs_pathing_grid : bool, optional
+ Whether observations include pathing values surrounding the agent
+ (default is False).
+ obs_terrain_height : bool, optional
+ Whether observations include terrain height values surrounding the
+ agent (default is False).
+ obs_instead_of_state : bool, optional
+ Use combination of all agents' observations as the global state
+ (default is False).
+ obs_timestep_number : bool, optional
+ Whether observations include the current timestep of the episode
+ (default is False).
+ state_last_action : bool, optional
+ Include the last actions of all agents as part of the global state
+ (default is True).
+ state_timestep_number : bool, optional
+ Whether the state include the current timestep of the episode
+ (default is False).
+ reward_sparse : bool, optional
+ Receive 1/-1 reward for winning/loosing an episode (default is
+ False). Whe rest of reward parameters are ignored if True.
+ reward_only_positive : bool, optional
+ Reward is always positive (default is True).
+ reward_death_value : float, optional
+ The amount of reward received for killing an enemy unit (default
+ is 10). This is also the negative penalty for having an allied unit
+ killed if reward_only_positive == False.
+ reward_win : float, optional
+ The reward for winning in an episode (default is 200).
+ reward_defeat : float, optional
+ The reward for loosing in an episode (default is 0). This value
+ should be nonpositive.
+ reward_negative_scale : float, optional
+ Scaling factor for negative rewards (default is 0.5). This
+ parameter is ignored when reward_only_positive == True.
+ reward_scale : bool, optional
+ Whether or not to scale the reward (default is True).
+ reward_scale_rate : float, optional
+ Reward scale rate (default is 20). When reward_scale == True, the
+ reward received by the agents is divided by (max_reward /
+ reward_scale_rate), where max_reward is the maximum possible
+ reward per episode without considering the shield regeneration
+ of Protoss units.
+ replay_dir : str, optional
+ The directory to save replays (default is None). If None, the
+ replay will be saved in Replays directory where StarCraft II is
+ installed.
+ replay_prefix : str, optional
+ The prefix of the replay to be saved (default is None). If None,
+ the name of the map will be used.
+ window_size_x : int, optional
+ The length of StarCraft II window size (default is 1920).
+ window_size_y: int, optional
+ The height of StarCraft II window size (default is 1200).
+ heuristic_ai: bool, optional
+ Whether or not to use a non-learning heuristic AI (default False).
+ heuristic_rest: bool, optional
+ At any moment, restrict the actions of the heuristic AI to be
+ chosen from actions available to RL agents (default is False).
+ Ignored if heuristic_ai == False.
+ debug: bool, optional
+ Log messages about observations, state, actions and rewards for
+ debugging purposes (default is False).
+ """
+ # Map arguments
+ self.map_name = map_name
+ map_params = get_map_params(self.map_name)
+ self.map_params = map_params
+ self.episode_limit = map_params["limit"]
+ self._move_amount = move_amount
+ self._step_mul = step_mul
+ self._kill_unit_step_mul = kill_unit_step_mul
+ self.difficulty = difficulty
+
+ # Observations and state
+ self.obs_own_health = obs_own_health
+ self.obs_all_health = obs_all_health
+ self.obs_instead_of_state = obs_instead_of_state
+ self.obs_last_action = obs_last_action
+ self.obs_pathing_grid = obs_pathing_grid
+ self.obs_terrain_height = obs_terrain_height
+ self.obs_timestep_number = obs_timestep_number
+ self.obs_starcraft = obs_starcraft
+ self.state_last_action = state_last_action
+ self.state_timestep_number = state_timestep_number
+ if self.obs_all_health:
+ self.obs_own_health = True
+ self.n_obs_pathing = 8
+ self.n_obs_height = 9
+
+ # Rewards args
+ self.reward_sparse = reward_sparse
+ self.reward_only_positive = reward_only_positive
+ self.reward_negative_scale = reward_negative_scale
+ self.reward_death_value = reward_death_value
+ self.reward_win = reward_win
+ self.reward_defeat = reward_defeat
+ self.reward_scale = reward_scale
+ self.reward_scale_rate = reward_scale_rate
+
+ # Meta MARL
+ self.capability_config = capability_config
+ self.fully_observable = fully_observable
+ self.stochastic_attack = "attack" in self.capability_config
+ self.stochastic_health = "health" in self.capability_config
+ self.replace_teammates = "team_gen" in self.capability_config
+ self.obs_own_pos = obs_own_pos
+ self.mask_enemies = "enemy_mask" in self.capability_config
+ if self.stochastic_attack:
+ self.zero_pad_stochastic_attack = not self.capability_config[
+ "attack"
+ ]["observe"]
+ self.observe_attack_probs = self.capability_config["attack"][
+ "observe"
+ ]
+ if self.stochastic_health:
+ self.zero_pad_health = not self.capability_config["health"][
+ "observe"
+ ]
+ self.observe_teammate_health = self.capability_config["health"][
+ "observe"
+ ]
+ if self.replace_teammates:
+ self.zero_pad_unit_types = not self.capability_config["team_gen"][
+ "observe"
+ ]
+ self.observe_teammate_types = self.capability_config["team_gen"][
+ "observe"
+ ]
+ self.n_agents = (
+ map_params["n_agents"]
+ if not self.replace_teammates
+ else self.capability_config["team_gen"]["n_units"]
+ )
+ self.n_enemies = (
+ map_params["n_enemies"]
+ if not self.replace_teammates
+ else self.capability_config["team_gen"]["n_units"]
+ )
+ self.random_start = "start_positions" in self.capability_config
+ self.conic_fov = conic_fov
+ if self.conic_fov:
+ num_fov_actions = 4
+ self.n_fov_actions = num_fov_actions if self.conic_fov else 0
+ self.conic_fov_angle = (
+ (2 * np.pi) / self.n_fov_actions if self.conic_fov else 0
+ )
+ # Other
+ self.game_version = game_version
+ self.continuing_episode = continuing_episode
+ self._seed = seed
+ self.heuristic_ai = heuristic_ai
+ self.heuristic_rest = heuristic_rest
+ self.debug = debug
+ self.window_size = (window_size_x, window_size_y)
+ self.replay_dir = replay_dir
+ self.replay_prefix = replay_prefix
+
+ # Actions
+ self.n_actions_move = 4
+
+ self.n_actions_no_attack = self.n_actions_move + 2
+ self.n_actions = self.n_actions_no_attack + self.n_enemies
+
+ # Map info
+ self._agent_race = map_params["a_race"]
+ self._bot_race = map_params["b_race"]
+ self.shield_bits_ally = 1 if self._agent_race == "P" else 0
+ self.shield_bits_enemy = 1 if self._bot_race == "P" else 0
+ # NOTE: The map_type, which is used to initialise the unit
+ # type ids, the unit_type_bits and the races, are still properties of the
+ # map. This means even the 10gen_{race} maps are limited to the
+ # unit types statically defined in the unit type id assignment.
+ # Lifting this restriction shouldn't be too much work, I've just
+ # not done it.
+ self.unit_type_bits = map_params["unit_type_bits"]
+ self.map_type = map_params["map_type"]
+ self._unit_types = None
+
+ self.max_reward = (
+ self.n_enemies * self.reward_death_value + self.reward_win
+ )
+
+ # create lists containing the names of attributes returned in states
+ self.ally_state_attr_names = [
+ "health",
+ "energy/cooldown",
+ "rel_x",
+ "rel_y",
+ ]
+ self.enemy_state_attr_names = ["health", "rel_x", "rel_y"]
+
+ if self.shield_bits_ally > 0:
+ self.ally_state_attr_names += ["shield"]
+ if self.shield_bits_enemy > 0:
+ self.enemy_state_attr_names += ["shield"]
+ if self.conic_fov:
+ self.ally_state_attr_names += ["fov_x", "fov_y"]
+
+ self.capability_attr_names = []
+ if "attack" in self.capability_config:
+ self.capability_attr_names += ["attack_probability"]
+ if "health" in self.capability_config:
+ self.capability_attr_names += ["total_health"]
+ if self.unit_type_bits > 0:
+ bit_attr_names = [
+ "type_{}".format(bit) for bit in range(self.unit_type_bits)
+ ]
+ self.capability_attr_names += bit_attr_names
+ self.enemy_state_attr_names += bit_attr_names
+
+ self.agents = {}
+ self.enemies = {}
+ self.unit_name_to_id_map = {}
+ self.id_to_unit_name_map = {}
+ self._episode_count = 0
+ self._episode_steps = 0
+ self._total_steps = 0
+ self._obs = None
+ self.battles_won = 0
+ self.battles_game = 0
+ self.timeouts = 0
+ self.force_restarts = 0
+ self.last_stats = None
+ self.agent_attack_probabilities = np.zeros(self.n_agents)
+ self.agent_health_levels = np.zeros(self.n_agents)
+ self.death_tracker_ally = np.zeros(self.n_agents)
+ self.death_tracker_enemy = np.zeros(self.n_enemies)
+ self.fov_directions = np.zeros((self.n_agents, 2))
+ self.fov_directions[:, 0] = 1.0
+ self.canonical_fov_directions = np.array(
+ [
+ (
+ np.cos(2 * np.pi * (i / self.n_fov_actions)),
+ np.sin(2 * np.pi * (i / self.n_fov_actions)),
+ )
+ for i in range(self.n_fov_actions)
+ ]
+ )
+ self.new_unit_positions = np.zeros((self.n_agents, 2))
+ self.previous_ally_units = None
+ self.previous_enemy_units = None
+ self.last_action = np.zeros((self.n_agents, self.n_actions))
+ self.init_positions = np.zeros((self.n_agents, 2))
+ self._min_unit_type = 0
+ self.marine_id = self.marauder_id = self.medivac_id = 0
+ self.hydralisk_id = self.zergling_id = self.baneling_id = 0
+ self.stalker_id = self.colossus_id = self.zealot_id = 0
+ self.max_distance_x = 0
+ self.max_distance_y = 0
+ self.map_x = 0
+ self.map_y = 0
+ self.reward = 0
+ self.renderer = None
+ self.terrain_height = None
+ self.pathing_grid = None
+ self._run_config = None
+ self._sc2_proc = None
+ self._controller = None
+ # Try to avoid leaking SC2 processes on shutdown
+ atexit.register(lambda: self.close())
+
+ def _only_one_meta_marl_flag_on(self):
+ """Function that checks that either all the meta marl flags are off,
+ or at most one has been enabled."""
+ if self.stochastic_attack:
+ return not self.stochastic_health and not self.replace_teammates
+ else:
+ return not self.replace_teammates or not self.stochastic_health
+
+ def _launch(self):
+ """Launch the StarCraft II game."""
+ self._run_config = run_configs.get(version=self.game_version)
+ self.version = self._run_config.version
+ _map = maps.get(self.map_name)
+
+ # Setting up the interface
+ interface_options = sc_pb.InterfaceOptions(raw=True, score=False)
+ self._sc2_proc = self._run_config.start(
+ window_size=self.window_size, want_rgb=False
+ )
+ self._controller = self._sc2_proc.controller
+
+ # Request to create the game
+ create = sc_pb.RequestCreateGame(
+ local_map=sc_pb.LocalMap(
+ map_path=_map.path,
+ map_data=self._run_config.map_data(_map.path),
+ ),
+ realtime=False,
+ random_seed=self._seed,
+ )
+ create.player_setup.add(type=sc_pb.Participant)
+ create.player_setup.add(
+ type=sc_pb.Computer,
+ race=races[self._bot_race],
+ difficulty=difficulties[self.difficulty],
+ )
+ self._controller.create_game(create)
+
+ join = sc_pb.RequestJoinGame(
+ race=races[self._agent_race], options=interface_options
+ )
+ self._controller.join_game(join)
+
+ game_info = self._controller.game_info()
+ map_info = game_info.start_raw
+ self.map_play_area_min = map_info.playable_area.p0
+ self.map_play_area_max = map_info.playable_area.p1
+ self.max_distance_x = (
+ self.map_play_area_max.x - self.map_play_area_min.x
+ )
+ self.max_distance_y = (
+ self.map_play_area_max.y - self.map_play_area_min.y
+ )
+ self.map_x = map_info.map_size.x
+ self.map_y = map_info.map_size.y
+
+ if map_info.pathing_grid.bits_per_pixel == 1:
+ vals = np.array(list(map_info.pathing_grid.data)).reshape(
+ self.map_x, int(self.map_y / 8)
+ )
+ self.pathing_grid = np.transpose(
+ np.array(
+ [
+ [(b >> i) & 1 for b in row for i in range(7, -1, -1)]
+ for row in vals
+ ],
+ dtype=np.bool,
+ )
+ )
+ else:
+ self.pathing_grid = np.invert(
+ np.flip(
+ np.transpose(
+ np.array(
+ list(map_info.pathing_grid.data), dtype=np.bool
+ ).reshape(self.map_x, self.map_y)
+ ),
+ axis=1,
+ )
+ )
+
+ self.terrain_height = (
+ np.flip(
+ np.transpose(
+ np.array(list(map_info.terrain_height.data)).reshape(
+ self.map_x, self.map_y
+ )
+ ),
+ 1,
+ )
+ / 255
+ )
+
+ def reset(self, episode_config={}):
+ """Reset the environment. Required after each full episode.
+ Returns initial observations and states.
+ """
+ self._episode_steps = 0
+ self.episode_config = episode_config
+ if self._episode_count == 0:
+ # Launch StarCraft II
+ self._launch()
+ else:
+ self._restart()
+
+ # Information kept for counting the reward
+ self.agent_attack_probabilities = episode_config.get("attack", {}).get(
+ "item", None
+ )
+ self.agent_health_levels = episode_config.get("health", {}).get(
+ "item", None
+ )
+ self.enemy_mask = episode_config.get("enemy_mask", {}).get(
+ "item", None
+ )
+ self.ally_start_positions = episode_config.get(
+ "ally_start_positions", {}
+ ).get("item", None)
+ self.enemy_start_positions = episode_config.get(
+ "enemy_start_positions", {}
+ ).get("item", None)
+ self.mask_enemies = self.enemy_mask is not None
+ team = episode_config.get("team_gen", {}).get("item", None)
+ self.death_tracker_ally = np.zeros(self.n_agents)
+ self.death_tracker_enemy = np.zeros(self.n_enemies)
+ self.fov_directions = np.zeros((self.n_agents, 2))
+ self.fov_directions[:, 0] = 1.0
+ self.previous_ally_units = None
+ self.previous_enemy_units = None
+ self.win_counted = False
+ self.defeat_counted = False
+ if self.debug:
+ logging.debug(
+ "Attack Probabilities: {}".format(self.agent_attack_probabilities)
+ )
+ logging.debug("Health Levels: {}".format(self.agent_health_levels))
+ self.last_action = np.zeros((self.n_agents, self.n_actions))
+
+ if self.heuristic_ai:
+ self.heuristic_targets = [None] * self.n_agents
+
+ try:
+ self._obs = self._controller.observe()
+ self.init_units(team, episode_config=episode_config)
+ except (protocol.ProtocolError, protocol.ConnectionError):
+ self.full_restart()
+
+ if self.debug:
+ logging.debug(
+ "Started Episode {}".format(self._episode_count).center(
+ 60, "*"
+ )
+ )
+ return self.get_obs(), self.get_state()
+
+ def _restart(self):
+ """Restart the environment by killing all units on the map.
+ There is a trigger in the SC2Map file, which restarts the
+ episode when there are no units left.
+ """
+ try:
+ self._kill_all_units()
+ except (protocol.ProtocolError, protocol.ConnectionError):
+ self.full_restart()
+
+ def full_restart(self):
+ """Full restart. Closes the SC2 process and launches a new one."""
+ self._sc2_proc.close()
+ self._launch()
+ self.force_restarts += 1
+
+ def _kill_units_below_health_level(self):
+ units_to_kill = []
+ for al_id, al_unit in self.agents.items():
+ if (
+ al_unit.health / al_unit.health_max
+ < self.agent_health_levels[al_id]
+ ) and not self.death_tracker_ally[al_id]:
+ units_to_kill.append(al_unit.tag)
+ self._kill_units(units_to_kill)
+
+ def step(self, actions):
+ """A single environment step. Returns reward, terminated, info."""
+ actions_int = [int(a) for a in actions]
+
+ self.last_action = np.eye(self.n_actions)[np.array(actions_int)]
+
+ # Collect individual actions
+ sc_actions = []
+ if self.debug:
+ logging.debug("Actions".center(60, "-"))
+
+ for a_id, action in enumerate(actions_int):
+ if not self.heuristic_ai:
+ sc_action = self.get_agent_action(a_id, action)
+ else:
+ sc_action, action_num = self.get_agent_action_heuristic(
+ a_id, action
+ )
+ actions[a_id] = action_num
+ if sc_action:
+ sc_actions.append(sc_action)
+ # Send action request
+ req_actions = sc_pb.RequestAction(actions=sc_actions)
+
+ try:
+ if self.conic_fov:
+ self.render_fovs()
+ self._controller.actions(req_actions)
+ # Make step in SC2, i.e. apply actions
+ if not self.stochastic_health:
+ self._controller.step(self._step_mul)
+ else:
+ self._controller.step(
+ self._step_mul - self._kill_unit_step_mul
+ )
+ self._kill_units_below_health_level()
+ self._controller.step(self._kill_unit_step_mul)
+ # Observe here so that we know if the episode is over.
+ self._obs = self._controller.observe()
+ except (protocol.ProtocolError, protocol.ConnectionError):
+ self.full_restart()
+ return 0, True, {}
+
+ self._total_steps += 1
+ self._episode_steps += 1
+
+ # Update units
+ game_end_code = self.update_units()
+
+ terminated = False
+ reward = self.reward_battle()
+ info = {"battle_won": False}
+
+ # count units that are still alive
+ dead_allies, dead_enemies = 0, 0
+ for _al_id, al_unit in self.agents.items():
+ if al_unit.health == 0:
+ dead_allies += 1
+ for _e_id, e_unit in self.enemies.items():
+ if e_unit.health == 0:
+ dead_enemies += 1
+
+ info["dead_allies"] = dead_allies
+ info["dead_enemies"] = dead_enemies
+
+ if game_end_code is not None:
+ # Battle is over
+ terminated = True
+ self.battles_game += 1
+ if game_end_code == 1 and not self.win_counted:
+ self.battles_won += 1
+ self.win_counted = True
+ info["battle_won"] = True
+ if not self.reward_sparse:
+ reward += self.reward_win
+ else:
+ reward = 1
+ elif game_end_code == -1 and not self.defeat_counted:
+ self.defeat_counted = True
+ if not self.reward_sparse:
+ reward += self.reward_defeat
+ else:
+ reward = -1
+
+ elif self._episode_steps >= self.episode_limit:
+ # Episode limit reached
+ terminated = True
+ if self.continuing_episode:
+ info["episode_limit"] = True
+ self.battles_game += 1
+ self.timeouts += 1
+
+ if self.debug:
+ logging.debug("Reward = {}".format(reward).center(60, "-"))
+
+ if terminated:
+ self._episode_count += 1
+
+ if self.reward_scale:
+ reward /= self.max_reward / self.reward_scale_rate
+
+ self.reward = reward
+
+ return reward, terminated, info
+
+ def get_agent_action(self, a_id, action):
+ """Construct the action for agent a_id."""
+ avail_actions = self.get_avail_agent_actions(a_id)
+ assert (
+ avail_actions[action] == 1
+ ), "Agent {} cannot perform action {}".format(a_id, action)
+
+ unit = self.get_unit_by_id(a_id)
+ tag = unit.tag
+ x = unit.pos.x
+ y = unit.pos.y
+
+ if action == 0:
+ # no-op (valid only when dead)
+ assert unit.health == 0, "No-op only available for dead agents."
+ if self.debug:
+ logging.debug("Agent {}: Dead".format(a_id))
+ return None
+ elif action == 1:
+ # stop
+ cmd = r_pb.ActionRawUnitCommand(
+ ability_id=actions["stop"],
+ unit_tags=[tag],
+ queue_command=False,
+ )
+ if self.debug:
+ logging.debug("Agent {}: Stop".format(a_id))
+
+ elif action == 2:
+ # move north
+ cmd = r_pb.ActionRawUnitCommand(
+ ability_id=actions["move"],
+ target_world_space_pos=sc_common.Point2D(
+ x=x, y=y + self._move_amount
+ ),
+ unit_tags=[tag],
+ queue_command=False,
+ )
+ self.new_unit_positions[a_id] = np.array(
+ [x, y + self._move_amount]
+ )
+ if self.conic_fov:
+ self.fov_directions[a_id] = self.canonical_fov_directions[1]
+ if self.debug:
+ logging.debug("Agent {}: Move North".format(a_id))
+
+ elif action == 3:
+ # move south
+ cmd = r_pb.ActionRawUnitCommand(
+ ability_id=actions["move"],
+ target_world_space_pos=sc_common.Point2D(
+ x=x, y=y - self._move_amount
+ ),
+ unit_tags=[tag],
+ queue_command=False,
+ )
+ self.new_unit_positions[a_id] = np.array(
+ [x, y - self._move_amount]
+ )
+ if self.conic_fov:
+ self.fov_directions[a_id] = self.canonical_fov_directions[3]
+ if self.debug:
+ logging.debug("Agent {}: Move South".format(a_id))
+
+ elif action == 4:
+ # move east
+ cmd = r_pb.ActionRawUnitCommand(
+ ability_id=actions["move"],
+ target_world_space_pos=sc_common.Point2D(
+ x=x + self._move_amount, y=y
+ ),
+ unit_tags=[tag],
+ queue_command=False,
+ )
+ self.new_unit_positions[a_id] = np.array(
+ [x + self._move_amount, y]
+ )
+ if self.conic_fov:
+ self.fov_directions[a_id] = self.canonical_fov_directions[0]
+ if self.debug:
+ logging.debug("Agent {}: Move East".format(a_id))
+
+ elif action == 5:
+ # move west
+ cmd = r_pb.ActionRawUnitCommand(
+ ability_id=actions["move"],
+ target_world_space_pos=sc_common.Point2D(
+ x=x - self._move_amount, y=y
+ ),
+ unit_tags=[tag],
+ queue_command=False,
+ )
+ self.new_unit_positions[a_id] = np.array(
+ [x - self._move_amount, y]
+ )
+ if self.conic_fov:
+ self.fov_directions[a_id] = self.canonical_fov_directions[2]
+ if self.debug:
+ logging.debug("Agent {}: Move West".format(a_id))
+ else:
+ # attack/heal units that are in range
+ target_id = action - self.n_actions_no_attack
+ if (
+ self.map_type in ["MMM", "terran_gen"]
+ and unit.unit_type == self.medivac_id
+ ):
+ target_unit = self.agents[target_id]
+ action_name = "heal"
+ else:
+ target_unit = self.enemies[target_id]
+ action_name = "attack"
+
+ if self.stochastic_attack:
+ p = np.random.default_rng().uniform()
+ if p > self.agent_attack_probabilities[a_id]:
+ if self.debug:
+ logging.debug(
+ "Agent {} {}s {}, but fails".format(a_id, action_name, target_id)
+ )
+ return None
+ action_id = actions[action_name]
+ target_tag = target_unit.tag
+
+ cmd = r_pb.ActionRawUnitCommand(
+ ability_id=action_id,
+ target_unit_tag=target_tag,
+ unit_tags=[tag],
+ queue_command=False,
+ )
+
+ if self.debug:
+ logging.debug(
+ "Agent {} {}s unit # {}".format(
+ a_id, action_name, target_id
+ )
+ )
+ if cmd:
+ sc_action = sc_pb.Action(
+ action_raw=r_pb.ActionRaw(unit_command=cmd)
+ )
+ return sc_action
+ return None
+
+ def get_agent_action_heuristic(self, a_id, action):
+ unit = self.get_unit_by_id(a_id)
+ tag = unit.tag
+
+ target = self.heuristic_targets[a_id]
+ if unit.unit_type == self.medivac_id:
+ if (
+ target is None
+ or self.agents[target].health == 0
+ or self.agents[target].health == self.agents[target].health_max
+ ):
+ min_dist = math.hypot(self.max_distance_x, self.max_distance_y)
+ min_id = -1
+ for al_id, al_unit in self.agents.items():
+ if al_unit.unit_type == self.medivac_id:
+ continue
+ if (
+ al_unit.health != 0
+ and al_unit.health != al_unit.health_max
+ ):
+ dist = self.distance(
+ unit.pos.x,
+ unit.pos.y,
+ al_unit.pos.x,
+ al_unit.pos.y,
+ )
+ if dist < min_dist:
+ min_dist = dist
+ min_id = al_id
+ self.heuristic_targets[a_id] = min_id
+ if min_id == -1:
+ self.heuristic_targets[a_id] = None
+ return None, 0
+ action_id = actions["heal"]
+ target_tag = self.agents[self.heuristic_targets[a_id]].tag
+ else:
+ if target is None or self.enemies[target].health == 0:
+ min_dist = math.hypot(self.max_distance_x, self.max_distance_y)
+ min_id = -1
+ for e_id, e_unit in self.enemies.items():
+ if (
+ unit.unit_type == self.marauder_id
+ and e_unit.unit_type == self.medivac_id
+ ):
+ continue
+ if e_unit.health > 0:
+ dist = self.distance(
+ unit.pos.x, unit.pos.y, e_unit.pos.x, e_unit.pos.y
+ )
+ if dist < min_dist:
+ min_dist = dist
+ min_id = e_id
+ self.heuristic_targets[a_id] = min_id
+ if min_id == -1:
+ self.heuristic_targets[a_id] = None
+ return None, 0
+ action_id = actions["attack"]
+ target_tag = self.enemies[self.heuristic_targets[a_id]].tag
+
+ action_num = self.heuristic_targets[a_id] + self.n_actions_no_attack
+
+ # Check if the action is available
+ if (
+ self.heuristic_rest
+ and self.get_avail_agent_actions(a_id)[action_num] == 0
+ ):
+
+ # Move towards the target rather than attacking/healing
+ if unit.unit_type == self.medivac_id:
+ target_unit = self.agents[self.heuristic_targets[a_id]]
+ else:
+ target_unit = self.enemies[self.heuristic_targets[a_id]]
+
+ delta_x = target_unit.pos.x - unit.pos.x
+ delta_y = target_unit.pos.y - unit.pos.y
+
+ if abs(delta_x) > abs(delta_y): # east or west
+ if delta_x > 0: # east
+ target_pos = sc_common.Point2D(
+ x=unit.pos.x + self._move_amount, y=unit.pos.y
+ )
+ action_num = 4
+ else: # west
+ target_pos = sc_common.Point2D(
+ x=unit.pos.x - self._move_amount, y=unit.pos.y
+ )
+ action_num = 5
+ else: # north or south
+ if delta_y > 0: # north
+ target_pos = sc_common.Point2D(
+ x=unit.pos.x, y=unit.pos.y + self._move_amount
+ )
+ action_num = 2
+ else: # south
+ target_pos = sc_common.Point2D(
+ x=unit.pos.x, y=unit.pos.y - self._move_amount
+ )
+ action_num = 3
+
+ cmd = r_pb.ActionRawUnitCommand(
+ ability_id=actions["move"],
+ target_world_space_pos=target_pos,
+ unit_tags=[tag],
+ queue_command=False,
+ )
+ else:
+ # Attack/heal the target
+ cmd = r_pb.ActionRawUnitCommand(
+ ability_id=action_id,
+ target_unit_tag=target_tag,
+ unit_tags=[tag],
+ queue_command=False,
+ )
+
+ sc_action = sc_pb.Action(action_raw=r_pb.ActionRaw(unit_command=cmd))
+ return sc_action, action_num
+
+ def reward_battle(self):
+ """Reward function when self.reward_spare==False.
+ Returns accumulative hit/shield point damage dealt to the enemy
+ + reward_death_value per enemy unit killed, and, in case
+ self.reward_only_positive == False, - (damage dealt to ally units
+ + reward_death_value per ally unit killed) * self.reward_negative_scale
+ """
+ assert (
+ not self.stochastic_health or self.reward_only_positive
+ ), "Different Health Levels are currently only compatible with positive rewards"
+ if self.reward_sparse:
+ return 0
+
+ reward = 0
+ delta_deaths = 0
+ delta_ally = 0
+ delta_enemy = 0
+
+ neg_scale = self.reward_negative_scale
+
+ # update deaths
+ for al_id, al_unit in self.agents.items():
+ if not self.death_tracker_ally[al_id]:
+ # did not die so far
+ prev_health = (
+ self.previous_ally_units[al_id].health
+ + self.previous_ally_units[al_id].shield
+ )
+ if al_unit.health == 0:
+ # just died
+ self.death_tracker_ally[al_id] = 1
+ if not self.reward_only_positive:
+ delta_deaths -= self.reward_death_value * neg_scale
+ delta_ally += prev_health * neg_scale
+ else:
+ # still alive
+ delta_ally += neg_scale * (
+ prev_health - al_unit.health - al_unit.shield
+ )
+
+ for e_id, e_unit in self.enemies.items():
+ if not self.death_tracker_enemy[e_id]:
+ prev_health = (
+ self.previous_enemy_units[e_id].health
+ + self.previous_enemy_units[e_id].shield
+ )
+ if e_unit.health == 0:
+ self.death_tracker_enemy[e_id] = 1
+ delta_deaths += self.reward_death_value
+ delta_enemy += prev_health
+ else:
+ delta_enemy += prev_health - e_unit.health - e_unit.shield
+
+ if self.reward_only_positive:
+ reward = max(delta_enemy + delta_deaths, 0) # shield regeneration
+ else:
+ reward = delta_enemy + delta_deaths - delta_ally
+
+ return reward
+
+ def get_total_actions(self):
+ """Returns the total number of actions an agent could ever take."""
+ return self.n_actions
+
+ @staticmethod
+ def distance(x1, y1, x2, y2):
+ """Distance between two points."""
+ return math.hypot(x2 - x1, y2 - y1)
+
+ def unit_shoot_range(self, agent_id):
+ """Returns the shooting range for an agent."""
+ return 6
+
+ def unit_sight_range(self, agent_id):
+ """Returns the sight range for an agent."""
+ return 9
+
+ def unit_max_cooldown(self, unit):
+ """Returns the maximal cooldown for a unit."""
+ switcher = {
+ self.marine_id: 15,
+ self.marauder_id: 25,
+ self.medivac_id: 200, # max energy
+ self.stalker_id: 35,
+ self.zealot_id: 22,
+ self.colossus_id: 24,
+ self.hydralisk_id: 10,
+ self.zergling_id: 11,
+ self.baneling_id: 1,
+ }
+ return switcher.get(unit.unit_type, 15)
+
+ def save_replay(self):
+ """Save a replay."""
+ prefix = self.replay_prefix or self.map_name
+ replay_dir = self.replay_dir or ""
+ replay_path = self._run_config.save_replay(
+ self._controller.save_replay(),
+ replay_dir=replay_dir,
+ prefix=prefix,
+ )
+ logging.info("Replay saved at: %s" % replay_path)
+
+ def unit_max_shield(self, unit):
+ """Returns maximal shield for a given unit."""
+ if unit.unit_type == 74 or unit.unit_type == self.stalker_id:
+ return 80 # Protoss's Stalker
+ elif unit.unit_type == 73 or unit.unit_type == self.zealot_id:
+ return 50 # Protoss's Zealot
+ elif unit.unit_type == 4 or unit.unit_type == self.colossus_id:
+ return 150 # Protoss's Colossus
+ else:
+ raise Exception("Maximum shield not recognised")
+
+ def can_move(self, unit, direction):
+ """Whether a unit can move in a given direction."""
+ m = self._move_amount / 2
+
+ if direction == Direction.NORTH:
+ x, y = int(unit.pos.x), int(unit.pos.y + m)
+ elif direction == Direction.SOUTH:
+ x, y = int(unit.pos.x), int(unit.pos.y - m)
+ elif direction == Direction.EAST:
+ x, y = int(unit.pos.x + m), int(unit.pos.y)
+ else:
+ x, y = int(unit.pos.x - m), int(unit.pos.y)
+
+ if self.check_bounds(x, y) and self.pathing_grid[x, y]:
+ return True
+
+ return False
+
+ def get_surrounding_points(self, unit, include_self=False):
+ """Returns the surrounding points of the unit in 8 directions."""
+ x = int(unit.pos.x)
+ y = int(unit.pos.y)
+
+ ma = self._move_amount
+
+ points = [
+ (x, y + 2 * ma),
+ (x, y - 2 * ma),
+ (x + 2 * ma, y),
+ (x - 2 * ma, y),
+ (x + ma, y + ma),
+ (x - ma, y - ma),
+ (x + ma, y - ma),
+ (x - ma, y + ma),
+ ]
+
+ if include_self:
+ points.append((x, y))
+
+ return points
+
+ def check_bounds(self, x, y):
+ """Whether a point is within the map bounds."""
+ return 0 <= x < self.map_x and 0 <= y < self.map_y
+
+ def get_surrounding_pathing(self, unit):
+ """Returns pathing values of the grid surrounding the given unit."""
+ points = self.get_surrounding_points(unit, include_self=False)
+ vals = [
+ self.pathing_grid[x, y] if self.check_bounds(x, y) else 1
+ for x, y in points
+ ]
+ return vals
+
+ def get_surrounding_height(self, unit):
+ """Returns height values of the grid surrounding the given unit."""
+ points = self.get_surrounding_points(unit, include_self=True)
+ vals = [
+ self.terrain_height[x, y] if self.check_bounds(x, y) else 1
+ for x, y in points
+ ]
+ return vals
+
+ def _compute_health(self, agent_id, unit):
+ """Each agent has a health bar with max health
+ `health_max` and current health `health`. We set a level
+ `health_level` between `0` and `1` where the agent dies if its
+ proportional health (`health / health_max`) is below that level.
+ This function rescales health to take into account this death level.
+
+ In the proportional health scale we have something that looks like this:
+
+ -------------------------------------------------------------
+ 0 1
+ ^ health_level ^ proportional_health
+ And so we compute
+ (proportional_health - health_level) / (1 - health_level)
+ """
+ proportional_health = unit.health / unit.health_max
+ health_level = self.agent_health_levels[agent_id]
+ return (1.0 / (1 - health_level)) * (
+ proportional_health - health_level
+ )
+
+ def render_fovs(self):
+ lines_to_render = []
+ for agent_id in range(self.n_agents):
+ if not self.death_tracker_ally[agent_id]:
+ lines_to_render.extend(self.agent_cone(agent_id))
+ debug_command = d_pb.DebugCommand(
+ draw=d_pb.DebugDraw(lines=lines_to_render)
+ )
+ self._controller.debug(debug_command)
+
+ def agent_cone(self, agent_id):
+ fov_direction = self.fov_directions[agent_id]
+ c, s = np.cos(self.conic_fov_angle / 2), np.sin(
+ self.conic_fov_angle / 2
+ )
+ sight_range = self.unit_sight_range(agent_id)
+ rot = np.array([[c, -s], [s, c]]) # Contra Rotate
+ neg_rot = np.array([[c, s], [-s, c]]) # Rotate Clockwise
+ start_pos = self.new_unit_positions[agent_id]
+ init_pos = sc_common.Point(
+ x=start_pos[0],
+ y=start_pos[1],
+ z=self.get_unit_by_id(agent_id).pos.z,
+ )
+ upper_cone_end = start_pos + (rot @ fov_direction) * sight_range
+ lower_cone_end = start_pos + (neg_rot @ fov_direction) * sight_range
+ lines = [
+ d_pb.DebugLine(
+ line=d_pb.Line(
+ p0=init_pos,
+ p1=sc_common.Point(
+ x=upper_cone_end[0],
+ y=upper_cone_end[1],
+ z=init_pos.z,
+ ),
+ )
+ ),
+ d_pb.DebugLine(
+ line=d_pb.Line(
+ p0=init_pos,
+ p1=sc_common.Point(
+ x=lower_cone_end[0],
+ y=lower_cone_end[1],
+ z=init_pos.z,
+ ),
+ )
+ ),
+ ]
+ return lines
+
+ def is_position_in_cone(self, agent_id, pos, range="sight_range"):
+ ally_pos = self.get_unit_by_id(agent_id).pos
+ distance = self.distance(ally_pos.x, ally_pos.y, pos.x, pos.y)
+ # position is in this agent's cone if it is not outside the sight
+ # range and has the correct angle
+ if range == "sight_range":
+ unit_range = self.unit_sight_range(agent_id)
+ elif range == "shoot_range":
+ unit_range = self.unit_shoot_range(agent_id)
+ else:
+ raise Exception("Range argument not recognised")
+ if distance > unit_range:
+ return False
+ x_diff = pos.x - ally_pos.x
+ x_diff = max(x_diff, EPS) if x_diff > 0 else min(x_diff, -EPS)
+ obj_angle = np.arctan((pos.y - ally_pos.y) / x_diff)
+ x = self.fov_directions[agent_id][0]
+ x = max(x, EPS) if x_diff > 0 else min(x, -EPS)
+ fov_angle = np.arctan(self.fov_directions[agent_id][1] / x)
+ return np.abs(obj_angle - fov_angle) < self.conic_fov_angle / 2
+
+ def get_obs_agent(self, agent_id, fully_observable=False):
+ """Returns observation for agent_id. The observation is composed of:
+
+ - agent movement features (where it can move to, height information
+ and pathing grid)
+ - enemy features (available_to_attack, health, relative_x, relative_y,
+ shield, unit_type)
+ - ally features (visible, distance, relative_x, relative_y, shield,
+ unit_type)
+ - agent unit features (health, shield, unit_type)
+
+ All of this information is flattened and concatenated into a list,
+ in the aforementioned order. To know the sizes of each of the
+ features inside the final list of features, take a look at the
+ functions ``get_obs_move_feats_size()``,
+ ``get_obs_enemy_feats_size()``, ``get_obs_ally_feats_size()`` and
+ ``get_obs_own_feats_size()``.
+
+ The size of the observation vector may vary, depending on the
+ environment configuration and type of units present in the map.
+ For instance, non-Protoss units will not have shields, movement
+ features may or may not include terrain height and pathing grid,
+ unit_type is not included if there is only one type of unit in the
+ map etc.).
+
+ NOTE: Agents should have access only to their local observations
+ during decentralised execution.
+
+ fully_observable: -- ignores sight range for a particular unit.
+ For Debugging purposes ONLY -- not a fair observation.
+ """
+ unit = self.get_unit_by_id(agent_id)
+
+ move_feats_dim = self.get_obs_move_feats_size()
+ enemy_feats_dim = self.get_obs_enemy_feats_size()
+ ally_feats_dim = self.get_obs_ally_feats_size()
+ own_feats_dim = self.get_obs_own_feats_size()
+
+ move_feats = np.zeros(move_feats_dim, dtype=np.float32)
+ enemy_feats = np.zeros(enemy_feats_dim, dtype=np.float32)
+ ally_feats = np.zeros(ally_feats_dim, dtype=np.float32)
+ own_feats = np.zeros(own_feats_dim, dtype=np.float32)
+
+ if (
+ unit.health > 0 and self.obs_starcraft
+ ): # otherwise dead, return all zeros
+ x = unit.pos.x
+ y = unit.pos.y
+ sight_range = self.unit_sight_range(agent_id)
+
+ # Movement features. Do not need similar for looking
+ # around because this is always possible
+ avail_actions = self.get_avail_agent_actions(agent_id)
+ for m in range(self.n_actions_move):
+ move_feats[m] = avail_actions[m + 2]
+
+ ind = self.n_actions_move
+
+ if self.obs_pathing_grid:
+ move_feats[
+ ind: ind + self.n_obs_pathing # noqa
+ ] = self.get_surrounding_pathing(unit)
+ ind += self.n_obs_pathing
+
+ if self.obs_terrain_height:
+ move_feats[ind:] = self.get_surrounding_height(unit)
+
+ # Enemy features
+ for e_id, e_unit in self.enemies.items():
+ e_x = e_unit.pos.x
+ e_y = e_unit.pos.y
+ dist = self.distance(x, y, e_x, e_y)
+ enemy_visible = (
+ self.is_position_in_cone(agent_id, e_unit.pos)
+ if self.conic_fov
+ else dist < sight_range
+ )
+ if (enemy_visible and e_unit.health > 0) or (
+ e_unit.health > 0 and fully_observable
+ ): # visible and alive
+ # Sight range > shoot range
+ enemy_feats[e_id, 0] = avail_actions[
+ self.n_actions_no_attack + e_id
+ ] # available
+ enemy_feats[e_id, 1] = dist / sight_range # distance
+ enemy_feats[e_id, 2] = (
+ e_x - x
+ ) / sight_range # relative X
+ enemy_feats[e_id, 3] = (
+ e_y - y
+ ) / sight_range # relative Y
+ show_enemy = (
+ self.mask_enemies
+ and not self.enemy_mask[agent_id][e_id]
+ ) or not self.mask_enemies
+ ind = 4
+ if self.obs_all_health and show_enemy:
+ enemy_feats[e_id, ind] = (
+ e_unit.health / e_unit.health_max
+ ) # health
+ ind += 1
+ if self.shield_bits_enemy > 0:
+ max_shield = self.unit_max_shield(e_unit)
+ enemy_feats[e_id, ind] = (
+ e_unit.shield / max_shield
+ ) # shield
+ ind += 1
+
+ if self.unit_type_bits > 0 and show_enemy:
+ type_id = self.get_unit_type_id(e_unit, False)
+ enemy_feats[e_id, ind + type_id] = 1 # unit type
+
+ # Ally features
+ al_ids = [
+ al_id for al_id in range(self.n_agents) if al_id != agent_id
+ ]
+ for i, al_id in enumerate(al_ids):
+
+ al_unit = self.get_unit_by_id(al_id)
+ al_x = al_unit.pos.x
+ al_y = al_unit.pos.y
+ dist = self.distance(x, y, al_x, al_y)
+ ally_visible = (
+ self.is_position_in_cone(agent_id, al_unit.pos)
+ if self.conic_fov
+ else dist < sight_range
+ )
+ if (ally_visible and al_unit.health > 0) or (
+ al_unit.health > 0 and fully_observable
+ ): # visible and alive
+ ally_feats[i, 0] = 1 # visible
+ ally_feats[i, 1] = dist / sight_range # distance
+ ally_feats[i, 2] = (al_x - x) / sight_range # relative X
+ ally_feats[i, 3] = (al_y - y) / sight_range # relative Y
+
+ ind = 4
+ if self.obs_all_health:
+ if not self.stochastic_health:
+ ally_feats[i, ind] = (
+ al_unit.health / al_unit.health_max
+ ) # health
+ ind += 1
+ elif self.observe_teammate_health:
+ ally_feats[i, ind] = self._compute_health(
+ agent_id=al_id, unit=al_unit
+ )
+ ind += 1
+ elif self.zero_pad_health:
+ ind += 1
+ if self.shield_bits_ally > 0:
+ max_shield = self.unit_max_shield(al_unit)
+ ally_feats[i, ind] = (
+ al_unit.shield / max_shield
+ ) # shield
+ ind += 1
+ if self.stochastic_attack and self.observe_attack_probs:
+ ally_feats[i, ind] = self.agent_attack_probabilities[
+ al_id
+ ]
+ ind += 1
+ elif (
+ self.stochastic_attack
+ and self.zero_pad_stochastic_attack
+ ):
+ ind += 1
+
+ if self.stochastic_health and self.observe_teammate_health:
+ ally_feats[i, ind] = self.agent_health_levels[al_id]
+ ind += 1
+ elif self.stochastic_health and self.zero_pad_health:
+ ind += 1
+ if self.unit_type_bits > 0 and (
+ not self.replace_teammates
+ or self.observe_teammate_types
+ ):
+ type_id = self.get_unit_type_id(al_unit, True)
+ ally_feats[i, ind + type_id] = 1
+ ind += self.unit_type_bits
+ elif self.unit_type_bits > 0 and self.zero_pad_unit_types:
+ ind += self.unit_type_bits
+ if self.obs_last_action:
+ ally_feats[i, ind:] = self.last_action[al_id]
+
+ # Own features
+ ind = 0
+ if self.obs_own_health:
+ if not self.stochastic_health:
+ own_feats[ind] = unit.health / unit.health_max
+ else:
+ own_feats[ind] = self._compute_health(agent_id, unit)
+ ind += 1
+ if self.shield_bits_ally > 0:
+ max_shield = self.unit_max_shield(unit)
+ own_feats[ind] = unit.shield / max_shield
+ ind += 1
+
+ if self.stochastic_attack:
+ own_feats[ind] = self.agent_attack_probabilities[agent_id]
+ ind += 1
+ if self.stochastic_health:
+ own_feats[ind] = self.agent_health_levels[agent_id]
+ ind += 1
+ if self.obs_own_pos:
+ own_feats[ind] = x / self.map_x
+ own_feats[ind + 1] = y / self.map_y
+ ind += 2
+ if self.conic_fov:
+ own_feats[ind: ind + 2] = self.fov_directions[agent_id]
+ ind += 2
+ if self.unit_type_bits > 0:
+ type_id = self.get_unit_type_id(unit, True)
+ own_feats[ind + type_id] = 1
+ if self.obs_starcraft:
+ agent_obs = np.concatenate(
+ (
+ move_feats.flatten(),
+ enemy_feats.flatten(),
+ ally_feats.flatten(),
+ own_feats.flatten(),
+ )
+ )
+
+ if self.obs_timestep_number:
+ if self.obs_starcraft:
+ agent_obs = np.append(
+ agent_obs, self._episode_steps / self.episode_limit
+ )
+ else:
+ agent_obs = np.zeros(1, dtype=np.float32)
+ agent_obs[:] = self._episode_steps / self.episode_limit
+
+ if self.debug:
+ logging.debug("Obs Agent: {}".format(agent_id).center(60, "-"))
+ logging.debug(
+ "Avail. actions {}".format(
+ self.get_avail_agent_actions(agent_id)
+ )
+ )
+ logging.debug("Move feats {}".format(move_feats))
+ logging.debug("Enemy feats {}".format(enemy_feats))
+ logging.debug("Ally feats {}".format(ally_feats))
+ logging.debug("Own feats {}".format(own_feats))
+
+ return agent_obs
+
+ def get_obs(self):
+ """Returns all agent observations in a list.
+ NOTE: Agents should have access only to their local observations
+ during decentralised execution.
+ """
+ agents_obs = [
+ self.get_obs_agent(i, fully_observable=self.fully_observable)
+ for i in range(self.n_agents)
+ ]
+ return agents_obs
+
+ def get_capabilities_agent(self, agent_id):
+ unit = self.get_unit_by_id(agent_id)
+ cap_feats = np.zeros(self.get_cap_size(), dtype=np.float32)
+
+ ind = 0
+ if self.stochastic_attack:
+ cap_feats[ind] = self.agent_attack_probabilities[agent_id]
+ ind += 1
+ if self.stochastic_health:
+ cap_feats[ind] = self.agent_health_levels[agent_id]
+ ind += 1
+ if self.unit_type_bits > 0:
+ type_id = self.get_unit_type_id(unit, True)
+ cap_feats[ind + type_id] = 1
+
+ return cap_feats
+
+ def get_capabilities(self):
+ """Returns all agent capabilities in a list."""
+ agents_cap = [
+ self.get_capabilities_agent(i) for i in range(self.n_agents)
+ ]
+ agents_cap = np.concatenate(agents_cap, axis=0).astype(np.float32)
+ return agents_cap
+
+ def get_state(self):
+ """Returns the global state.
+ NOTE: This function should not be used during decentralised execution.
+ """
+ if self.obs_instead_of_state:
+ obs_concat = np.concatenate(self.get_obs(), axis=0).astype(
+ np.float32
+ )
+ return obs_concat
+
+ state_dict = self.get_state_dict()
+
+ state = np.append(
+ state_dict["allies"].flatten(), state_dict["enemies"].flatten()
+ )
+ if "last_action" in state_dict:
+ state = np.append(state, state_dict["last_action"].flatten())
+ if "timestep" in state_dict:
+ state = np.append(state, state_dict["timestep"])
+
+ state = state.astype(dtype=np.float32)
+
+ if self.debug:
+ logging.debug("STATE".center(60, "-"))
+ logging.debug("Ally state {}".format(state_dict["allies"]))
+ logging.debug("Enemy state {}".format(state_dict["enemies"]))
+ if self.state_last_action:
+ logging.debug("Last actions {}".format(self.last_action))
+
+ return state
+
+ def get_ally_num_attributes(self):
+ return len(self.ally_state_attr_names) + len(
+ self.capability_attr_names
+ )
+
+ def get_enemy_num_attributes(self):
+ return len(self.enemy_state_attr_names)
+
+ def get_state_dict(self):
+ """Returns the global state as a dictionary.
+
+ - allies: numpy array containing agents and their attributes
+ - enemies: numpy array containing enemies and their attributes
+ - last_action: numpy array of previous actions for each agent
+ - timestep: current no. of steps divided by total no. of steps
+
+ NOTE: This function should not be used during decentralised execution.
+ """
+
+ # number of features equals the number of attribute names
+ nf_al = self.get_ally_num_attributes()
+ nf_en = self.get_enemy_num_attributes()
+
+ ally_state = np.zeros((self.n_agents, nf_al))
+ enemy_state = np.zeros((self.n_enemies, nf_en))
+
+ center_x = self.map_x / 2
+ center_y = self.map_y / 2
+
+ for al_id, al_unit in self.agents.items():
+ if al_unit.health > 0:
+ x = al_unit.pos.x
+ y = al_unit.pos.y
+ max_cd = self.unit_max_cooldown(al_unit)
+ if not self.stochastic_health:
+ ally_state[al_id, 0] = (
+ al_unit.health / al_unit.health_max
+ ) # health
+ else:
+ ally_state[al_id, 0] = self._compute_health(al_id, al_unit)
+ if (
+ self.map_type in ["MMM", "terran_gen"]
+ and al_unit.unit_type == self.medivac_id
+ ):
+ ally_state[al_id, 1] = al_unit.energy / max_cd # energy
+ else:
+ ally_state[al_id, 1] = (
+ al_unit.weapon_cooldown / max_cd
+ ) # cooldown
+ ally_state[al_id, 2] = (
+ x - center_x
+ ) / self.max_distance_x # relative X
+ ally_state[al_id, 3] = (
+ y - center_y
+ ) / self.max_distance_y # relative Y
+
+ ind = 4
+ if self.shield_bits_ally > 0:
+ max_shield = self.unit_max_shield(al_unit)
+ ally_state[al_id, ind] = (
+ al_unit.shield / max_shield
+ ) # shield
+ ind += 1
+
+ if self.stochastic_attack:
+ ally_state[al_id, ind] = self.agent_attack_probabilities[
+ al_id
+ ]
+ ind += 1
+ if self.stochastic_health:
+ ally_state[al_id, ind] = self.agent_health_levels[al_id]
+ ind += 1
+ if self.conic_fov:
+ ally_state[al_id, ind: ind + 2] = self.fov_directions[
+ al_id
+ ]
+ if self.unit_type_bits > 0:
+ type_id = self.get_unit_type_id(al_unit, True)
+ ally_state[al_id, type_id - self.unit_type_bits] = 1
+
+ for e_id, e_unit in self.enemies.items():
+ if e_unit.health > 0:
+ x = e_unit.pos.x
+ y = e_unit.pos.y
+
+ enemy_state[e_id, 0] = (
+ e_unit.health / e_unit.health_max
+ ) # health
+ enemy_state[e_id, 1] = (
+ x - center_x
+ ) / self.max_distance_x # relative X
+ enemy_state[e_id, 2] = (
+ y - center_y
+ ) / self.max_distance_y # relative Y
+
+ if self.shield_bits_enemy > 0:
+ max_shield = self.unit_max_shield(e_unit)
+ enemy_state[e_id, 3] = e_unit.shield / max_shield # shield
+
+ if self.unit_type_bits > 0:
+ type_id = self.get_unit_type_id(e_unit, False)
+ enemy_state[e_id, type_id - self.unit_type_bits] = 1
+
+ state = {"allies": ally_state, "enemies": enemy_state}
+
+ if self.state_last_action:
+ state["last_action"] = self.last_action
+ if self.state_timestep_number:
+ state["timestep"] = self._episode_steps / self.episode_limit
+
+ return state
+
+ def get_obs_enemy_feats_size(self):
+ """Returns the dimensions of the matrix containing enemy features.
+ Size is n_enemies x n_features.
+ """
+ nf_en = 4 + self.unit_type_bits
+
+ if self.obs_all_health:
+ nf_en += 1 + self.shield_bits_enemy
+
+ return self.n_enemies, nf_en
+
+ def get_obs_ally_feats_size(self):
+ """Returns the dimensions of the matrix containing ally features.
+ Size is n_allies x n_features.
+ """
+ nf_al = 4
+ nf_cap = self.get_obs_ally_capability_size()
+
+ if self.obs_all_health:
+ nf_al += 1 + self.shield_bits_ally
+
+ if self.obs_last_action:
+ nf_al += self.n_actions
+
+ return self.n_agents - 1, nf_al + nf_cap
+
+ def get_obs_own_feats_size(self):
+ """
+ Returns the size of the vector containing the agents' own features.
+ """
+ own_feats = self.get_cap_size()
+ if self.obs_own_health and self.obs_starcraft:
+ own_feats += 1 + self.shield_bits_ally
+ if self.conic_fov and self.obs_starcraft:
+ own_feats += 2
+ if self.obs_own_pos and self.obs_starcraft:
+ own_feats += 2
+ return own_feats
+
+ def get_obs_move_feats_size(self):
+ """Returns the size of the vector containing the agents's movement-
+ related features.
+ """
+ move_feats = self.n_actions_move
+ if self.obs_pathing_grid:
+ move_feats += self.n_obs_pathing
+ if self.obs_terrain_height:
+ move_feats += self.n_obs_height
+
+ return move_feats
+
+ def get_obs_ally_capability_size(self):
+ """Returns the size of capabilities observed by teammates."""
+ cap_feats = self.unit_type_bits
+ if self.stochastic_attack and (
+ self.zero_pad_stochastic_attack or self.observe_attack_probs
+ ):
+ cap_feats += 1
+ if self.stochastic_health and (
+ self.observe_teammate_health or self.zero_pad_health
+ ):
+ cap_feats += 1
+
+ return cap_feats
+
+ def get_cap_size(self):
+ """Returns the size of the own capabilities of the agent."""
+ cap_feats = 0
+ if self.stochastic_attack:
+ cap_feats += 1
+ if self.stochastic_health:
+ cap_feats += 1
+ if self.unit_type_bits > 0:
+ cap_feats += self.unit_type_bits
+
+ return cap_feats
+
+ def get_obs_size(self):
+ """Returns the size of the observation."""
+ own_feats = self.get_obs_own_feats_size()
+ move_feats = self.get_obs_move_feats_size()
+
+ n_enemies, n_enemy_feats = self.get_obs_enemy_feats_size()
+ n_allies, n_ally_feats = self.get_obs_ally_feats_size()
+
+ enemy_feats = n_enemies * n_enemy_feats
+ ally_feats = n_allies * n_ally_feats
+ if self.obs_starcraft:
+ return (
+ self.obs_timestep_number
+ + move_feats
+ + enemy_feats
+ + ally_feats
+ + own_feats
+ )
+ else:
+ return 1 if self.obs_timestep_number else 0
+
+ def get_state_size(self):
+ """Returns the size of the global state."""
+ if self.obs_instead_of_state:
+ return self.get_obs_size() * self.n_agents
+
+ nf_al = self.get_ally_num_attributes()
+ nf_en = self.get_enemy_num_attributes()
+
+ enemy_state = self.n_enemies * nf_en
+ ally_state = self.n_agents * nf_al
+
+ size = enemy_state + ally_state
+
+ if self.state_last_action:
+ size += self.n_agents * self.n_actions
+ if self.state_timestep_number:
+ size += 1
+
+ return size
+
+ def get_visibility_matrix(self):
+ """Returns a boolean numpy array of dimensions
+ (n_agents, n_agents + n_enemies) indicating which units
+ are visible to each agent.
+ """
+ arr = np.zeros(
+ (self.n_agents, self.n_agents + self.n_enemies),
+ dtype=np.bool,
+ )
+
+ for agent_id in range(self.n_agents):
+ current_agent = self.get_unit_by_id(agent_id)
+ if current_agent.health > 0: # it agent not dead
+ x = current_agent.pos.x
+ y = current_agent.pos.y
+ sight_range = self.unit_sight_range(agent_id)
+
+ # Enemies
+ for e_id, e_unit in self.enemies.items():
+ e_x = e_unit.pos.x
+ e_y = e_unit.pos.y
+ dist = self.distance(x, y, e_x, e_y)
+
+ if dist < sight_range and e_unit.health > 0:
+ # visible and alive
+ arr[agent_id, self.n_agents + e_id] = 1
+
+ # The matrix for allies is filled symmetrically
+ al_ids = [
+ al_id for al_id in range(self.n_agents) if al_id > agent_id
+ ]
+ for _, al_id in enumerate(al_ids):
+ al_unit = self.get_unit_by_id(al_id)
+ al_x = al_unit.pos.x
+ al_y = al_unit.pos.y
+ dist = self.distance(x, y, al_x, al_y)
+
+ if dist < sight_range and al_unit.health > 0:
+ # visible and alive
+ arr[agent_id, al_id] = arr[al_id, agent_id] = 1
+
+ return arr
+
+ def get_unit_type_id(self, unit, ally):
+ """Returns the ID of unit type in the given scenario."""
+
+ if self.map_type == "protoss_gen":
+ if unit.unit_type in (self.stalker_id, Protoss.Stalker):
+ return 0
+ if unit.unit_type in (self.zealot_id, Protoss.Zealot):
+ return 1
+ if unit.unit_type in (self.colossus_id, Protoss.Colossus):
+ return 2
+ raise AttributeError()
+ if self.map_type == "terran_gen":
+ if unit.unit_type in (self.marine_id, Terran.Marine):
+ return 0
+ if unit.unit_type in (self.marauder_id, Terran.Marauder):
+ return 1
+ if unit.unit_type in (self.medivac_id, Terran.Medivac):
+ return 2
+ raise AttributeError()
+
+ if self.map_type == "zerg_gen":
+ if unit.unit_type in (self.zergling_id, Zerg.Zergling):
+ return 0
+ if unit.unit_type in (self.hydralisk_id, Zerg.Hydralisk):
+ return 1
+ if unit.unit_type in (self.baneling_id, Zerg.Baneling):
+ return 2
+ raise AttributeError()
+
+ # Old stuff
+ if ally: # use new SC2 unit types
+ type_id = unit.unit_type - self._min_unit_type
+
+ if self.map_type == "stalkers_and_zealots":
+ # id(Stalker) = 74, id(Zealot) = 73
+ type_id = unit.unit_type - 73
+ elif self.map_type == "colossi_stalkers_zealots":
+ # id(Stalker) = 74, id(Zealot) = 73, id(Colossus) = 4
+ if unit.unit_type == 4:
+ type_id = 0
+ elif unit.unit_type == 74:
+ type_id = 1
+ else:
+ type_id = 2
+ elif self.map_type == "bane":
+ if unit.unit_type == 9:
+ type_id = 0
+ else:
+ type_id = 1
+ elif self.map_type == "MMM":
+ if unit.unit_type == 51:
+ type_id = 0
+ elif unit.unit_type == 48:
+ type_id = 1
+ else:
+ type_id = 2
+
+ return type_id
+
+ def get_avail_agent_actions(self, agent_id):
+ """Returns the available actions for agent_id."""
+ unit = self.get_unit_by_id(agent_id)
+ if unit.health > 0:
+ # cannot choose no-op when alive
+ avail_actions = [0] * self.n_actions
+
+ # stop should be allowed
+ avail_actions[1] = 1
+
+ # see if we can move
+ if self.can_move(unit, Direction.NORTH):
+ avail_actions[2] = 1
+ if self.can_move(unit, Direction.SOUTH):
+ avail_actions[3] = 1
+ if self.can_move(unit, Direction.EAST):
+ avail_actions[4] = 1
+ if self.can_move(unit, Direction.WEST):
+ avail_actions[5] = 1
+
+ # Can attack only alive units that are alive in the shooting range
+ shoot_range = self.unit_shoot_range(agent_id)
+
+ target_items = self.enemies.items()
+ if self.map_type in ["MMM", "terran_gen"] and unit.unit_type == self.medivac_id:
+ # Medivacs cannot heal themselves or other flying units
+ target_items = [
+ (t_id, t_unit)
+ for (t_id, t_unit) in self.agents.items()
+ if t_unit.unit_type != self.medivac_id
+ ]
+ # should we only be able to target people in the cone?
+ for t_id, t_unit in target_items:
+ if t_unit.health > 0:
+ dist = self.distance(
+ unit.pos.x, unit.pos.y, t_unit.pos.x, t_unit.pos.y
+ )
+ can_shoot = (
+ dist <= shoot_range
+ if not self.conic_fov
+ else self.is_position_in_cone(
+ agent_id, t_unit.pos, range="shoot_range"
+ )
+ )
+ if can_shoot:
+ avail_actions[t_id + self.n_actions_no_attack] = 1
+
+ return avail_actions
+
+ else:
+ # only no-op allowed
+ return [1] + [0] * (self.n_actions - 1)
+
+ def get_avail_actions(self):
+ """Returns the available actions of all agents in a list."""
+ avail_actions = []
+ for agent_id in range(self.n_agents):
+ avail_agent = self.get_avail_agent_actions(agent_id)
+ avail_actions.append(avail_agent)
+ return avail_actions
+
+ def close(self):
+ """Close StarCraft II."""
+ if self.renderer is not None:
+ self.renderer.close()
+ self.renderer = None
+ if self._sc2_proc:
+ self._sc2_proc.close()
+
+ def seed(self):
+ """Returns the random seed used by the environment."""
+ return self._seed
+
+ def render(self, mode="human"):
+ if self.renderer is None:
+ from smac.env.starcraft2.render import StarCraft2Renderer
+
+ self.renderer = StarCraft2Renderer(self, mode)
+ assert (
+ mode == self.renderer.mode
+ ), "mode must be consistent across render calls"
+ return self.renderer.render(mode)
+
+ def _kill_units(self, unit_tags):
+ debug_command = [
+ d_pb.DebugCommand(kill_unit=d_pb.DebugKillUnit(tag=unit_tags))
+ ]
+ self._controller.debug(debug_command)
+
+ def _kill_all_units(self):
+ """Kill all units on the map. Steps controller and so can throw
+ exceptions"""
+ units = [unit.tag for unit in self._obs.observation.raw_data.units]
+ self._kill_units(units)
+ # check the units are dead
+ units = len(self._obs.observation.raw_data.units)
+ while len(self._obs.observation.raw_data.units) > 0:
+ self._controller.step(2)
+ self._obs = self._controller.observe()
+
+ def _create_new_team(self, team, episode_config):
+ # unit_names = {
+ # self.id_to_unit_name_map[unit.unit_type]
+ # for unit in self.agents.values()
+ # }
+ # It's important to set the number of agents and enemies
+ # because we use that to identify whether all the units have
+ # been created successfully
+
+ # TODO hardcoding init location. change this later for new maps
+ if not self.random_start:
+ ally_init_pos = [sc_common.Point2D(x=8, y=16)] * self.n_agents
+ # Spawning location of enemy units
+ enemy_init_pos = [sc_common.Point2D(x=24, y=16)] * self.n_enemies
+ else:
+ ally_init_pos = [
+ sc_common.Point2D(
+ x=self.ally_start_positions[i][0],
+ y=self.ally_start_positions[i][1],
+ )
+ for i in range(self.ally_start_positions.shape[0])
+ ]
+ enemy_init_pos = [
+ sc_common.Point2D(
+ x=self.enemy_start_positions[i][0],
+ y=self.enemy_start_positions[i][1],
+ )
+ for i in range(self.enemy_start_positions.shape[0])
+ ]
+ for unit_id, unit in enumerate(team):
+ unit_type_ally = self._convert_unit_name_to_unit_type(
+ unit, ally=True
+ )
+ debug_command = [
+ d_pb.DebugCommand(
+ create_unit=d_pb.DebugCreateUnit(
+ unit_type=unit_type_ally,
+ owner=1,
+ pos=ally_init_pos[unit_id],
+ quantity=1,
+ )
+ )
+ ]
+ self._controller.debug(debug_command)
+
+ unit_type_enemy = self._convert_unit_name_to_unit_type(
+ unit, ally=False
+ )
+ debug_command = [
+ d_pb.DebugCommand(
+ create_unit=d_pb.DebugCreateUnit(
+ unit_type=unit_type_enemy,
+ owner=2,
+ pos=enemy_init_pos[unit_id],
+ quantity=1,
+ )
+ )
+ ]
+ self._controller.debug(debug_command)
+
+ try:
+ self._controller.step(1)
+ self._obs = self._controller.observe()
+ except (protocol.ProtocolError, protocol.ConnectionError):
+ self.full_restart()
+ self.reset(episode_config=episode_config)
+
+ def _convert_unit_name_to_unit_type(self, unit_name, ally=True):
+ if ally:
+ return self.ally_unit_map[unit_name]
+ else:
+ return self.enemy_unit_map[unit_name]
+
+ def init_units(self, team, episode_config={}):
+ """Initialise the units."""
+ if team:
+ # can use any value for min unit type because
+ # it is hardcoded based on the version
+ self._init_ally_unit_types(0)
+ self._create_new_team(team, episode_config)
+ while True:
+ # Sometimes not all units have yet been created by SC2
+ self.agents = {}
+ self.enemies = {}
+
+ ally_units = [
+ unit
+ for unit in self._obs.observation.raw_data.units
+ if unit.owner == 1
+ ]
+ ally_units_sorted = sorted(
+ ally_units,
+ key=attrgetter("unit_type", "pos.x", "pos.y"),
+ reverse=False,
+ )
+
+ for i in range(len(ally_units_sorted)):
+ self.agents[i] = ally_units_sorted[i]
+ if self.debug:
+ logging.debug(
+ "Unit {} is {}, x = {}, y = {}".format(
+ len(self.agents),
+ self.agents[i].unit_type,
+ self.agents[i].pos.x,
+ self.agents[i].pos.y,
+ )
+ )
+
+ for unit in self._obs.observation.raw_data.units:
+ if unit.owner == 2:
+ self.enemies[len(self.enemies)] = unit
+ if self._episode_count == 0:
+ self.max_reward += unit.health_max + unit.shield_max
+
+ if self._episode_count == 0 and not team:
+ min_unit_type = min(
+ unit.unit_type for unit in self.agents.values()
+ )
+ self._init_ally_unit_types(min_unit_type)
+
+ all_agents_created = len(self.agents) == self.n_agents
+ all_enemies_created = len(self.enemies) == self.n_enemies
+
+ self._unit_types = [
+ unit.unit_type for unit in ally_units_sorted
+ ] + [
+ unit.unit_type
+ for unit in self._obs.observation.raw_data.units
+ if unit.owner == 2
+ ]
+
+ # TODO move this to the start
+ if all_agents_created and all_enemies_created: # all good
+ return
+
+ try:
+ self._controller.step(1)
+ self._obs = self._controller.observe()
+ except (protocol.ProtocolError, protocol.ConnectionError):
+ self.full_restart()
+ self.reset(episode_config=episode_config)
+
+ def get_unit_types(self):
+ if self._unit_types is None:
+ warn(
+ "unit types have not been initialized yet, please call"
+ "env.reset() to populate this and call t1286he method again."
+ )
+
+ return self._unit_types
+
+ def update_units(self):
+ """Update units after an environment step.
+ This function assumes that self._obs is up-to-date.
+ """
+ n_ally_alive = 0
+ n_enemy_alive = 0
+
+ # Store previous state
+ self.previous_ally_units = deepcopy(self.agents)
+ self.previous_enemy_units = deepcopy(self.enemies)
+
+ for al_id, al_unit in self.agents.items():
+ updated = False
+ for unit in self._obs.observation.raw_data.units:
+ if al_unit.tag == unit.tag:
+ self.agents[al_id] = unit
+ updated = True
+ n_ally_alive += 1
+ break
+
+ if not updated: # dead
+ al_unit.health = 0
+
+ for e_id, e_unit in self.enemies.items():
+ updated = False
+ for unit in self._obs.observation.raw_data.units:
+ if e_unit.tag == unit.tag:
+ self.enemies[e_id] = unit
+ updated = True
+ n_enemy_alive += 1
+ break
+
+ if not updated: # dead
+ e_unit.health = 0
+
+ if (
+ n_ally_alive == 0
+ and n_enemy_alive > 0
+ or self.only_medivac_left(ally=True)
+ ):
+ return -1 # lost
+ if (
+ n_ally_alive > 0
+ and n_enemy_alive == 0
+ or self.only_medivac_left(ally=False)
+ ):
+ return 1 # won
+ if n_ally_alive == 0 and n_enemy_alive == 0:
+ return 0
+
+ return None
+
+ def _register_unit_mapping(self, unit_name, unit_type_id):
+ self.id_to_unit_name_map[unit_type_id] = unit_name
+ self.unit_name_to_id_map[unit_name] = unit_type_id
+
+ def _init_ally_unit_types(self, min_unit_type):
+ """Initialise ally unit types. Should be called once from the
+ init_units function.
+ """
+
+ self._min_unit_type = min_unit_type
+
+ if "10gen_" in self.map_name:
+ num_rl_units = 9
+ self._min_unit_type = (
+ len(self._controller.data().units) - num_rl_units
+ )
+
+ self.baneling_id = self._min_unit_type
+ self.colossus_id = self._min_unit_type + 1
+ self.hydralisk_id = self._min_unit_type + 2
+ self.marauder_id = self._min_unit_type + 3
+ self.marine_id = self._min_unit_type + 4
+ self.medivac_id = self._min_unit_type + 5
+ self.stalker_id = self._min_unit_type + 6
+ self.zealot_id = self._min_unit_type + 7
+ self.zergling_id = self._min_unit_type + 8
+
+ self.ally_unit_map = {
+ "baneling": self.baneling_id,
+ "colossus": self.colossus_id,
+ "hydralisk": self.hydralisk_id,
+ "marauder": self.marauder_id,
+ "marine": self.marine_id,
+ "medivac": self.medivac_id,
+ "stalker": self.stalker_id,
+ "zealot": self.zealot_id,
+ "zergling": self.zergling_id,
+ }
+ self.enemy_unit_map = {
+ "baneling": Zerg.Baneling,
+ "colossus": Protoss.Colossus,
+ "hydralisk": Zerg.Hydralisk,
+ "marauder": Terran.Marauder,
+ "marine": Terran.Marine,
+ "medivac": Terran.Medivac,
+ "stalker": Protoss.Stalker,
+ "zealot": Protoss.Zealot,
+ "zergling": Zerg.Zergling,
+ }
+
+ else:
+ if self.map_type == "marines":
+ self.marine_id = min_unit_type
+ self._register_unit_mapping("marine", min_unit_type)
+ elif self.map_type == "stalkers_and_zealots":
+ self.stalker_id = min_unit_type
+ self._register_unit_mapping("stalker", min_unit_type)
+ self.zealot_id = min_unit_type + 1
+ self._register_unit_mapping("zealot", min_unit_type + 1)
+ elif self.map_type == "colossi_stalkers_zealots":
+ self.colossus_id = min_unit_type
+ self._register_unit_mapping("colossus", min_unit_type)
+ self.stalker_id = min_unit_type + 1
+ self._register_unit_mapping("stalker", min_unit_type + 1)
+ self.zealot_id = min_unit_type + 2
+ self._register_unit_mapping("zealot", min_unit_type + 2)
+ elif self.map_type == "MMM":
+ self.marauder_id = min_unit_type
+ self._register_unit_mapping("marauder", min_unit_type)
+ self.marine_id = min_unit_type + 1
+ self._register_unit_mapping("marine", min_unit_type + 1)
+ self.medivac_id = min_unit_type + 2
+ self._register_unit_mapping("medivac", min_unit_type + 2)
+ elif self.map_type == "zealots":
+ self.zealot_id = min_unit_type
+ self._register_unit_mapping("zealot", min_unit_type)
+ elif self.map_type == "hydralisks":
+ self.hydralisk_id = min_unit_type
+ self._register_unit_mapping("hydralisk", min_unit_type)
+ elif self.map_type == "stalkers":
+ self.stalker_id = min_unit_type
+ self._register_unit_mapping("stalker", min_unit_type)
+ elif self.map_type == "colossus":
+ self.colossus_id = min_unit_type
+ self._register_unit_mapping("colossus", min_unit_type)
+ elif self.map_type == "bane":
+ self.baneling_id = min_unit_type
+ self._register_unit_mapping("baneling", min_unit_type)
+ self.zergling_id = min_unit_type + 1
+ self._register_unit_mapping("zergling", min_unit_type + 1)
+
+ def only_medivac_left(self, ally):
+ """Check if only Medivac units are left."""
+ if self.map_type != "MMM" and self.map_type != "terran_gen":
+ return False
+
+ if ally:
+ units_alive = [
+ a
+ for a in self.agents.values()
+ if (a.health > 0 and a.unit_type != self.medivac_id)
+ ]
+ if len(units_alive) == 0:
+ return True
+ return False
+ else:
+ units_alive = [
+ a
+ for a in self.enemies.values()
+ if (a.health > 0 and a.unit_type != Terran.Medivac)
+ ]
+ if len(units_alive) == 0:
+ return True
+ return False
+
+ def get_unit_by_id(self, a_id):
+ """Get unit by ID."""
+ return self.agents[a_id]
+
+ def get_stats(self):
+ stats = {
+ "battles_won": self.battles_won,
+ "battles_game": self.battles_game,
+ "battles_draw": self.timeouts,
+ "win_rate": self.battles_won / self.battles_game,
+ "timeouts": self.timeouts,
+ "restarts": self.force_restarts,
+ }
+ return stats
+
+ def get_env_info(self):
+ env_info = super().get_env_info()
+ env_info["agent_features"] = (
+ self.ally_state_attr_names + self.capability_attr_names
+ )
+ env_info["enemy_features"] = self.enemy_state_attr_names
+ return env_info
diff --git a/src/envs/smac_v2/official/wrapper.py b/src/envs/smac_v2/official/wrapper.py
new file mode 100644
index 0000000..cf8d72a
--- /dev/null
+++ b/src/envs/smac_v2/official/wrapper.py
@@ -0,0 +1,88 @@
+from .distributions import get_distribution
+from .starcraft2 import StarCraft2Env
+from .starcraft2_hxt import StarCraft2Env as StarCraft2EnvMoveWithFov
+from envs.multiagentenv import MultiAgentEnv
+
+
+class StarCraftCapabilityEnvWrapper(MultiAgentEnv):
+ def __init__(self, **kwargs):
+ self.distribution_config = kwargs["capability_config"]
+ self.env_key_to_distribution_map = {}
+ self._parse_distribution_config()
+ change_fov_with_move = kwargs.pop("change_fov_with_move")
+ self.env = StarCraft2EnvMoveWithFov(**kwargs) if change_fov_with_move else StarCraft2Env(**kwargs)
+ assert (
+ self.distribution_config.keys()
+ == kwargs["capability_config"].keys()
+ ), "Must give distribution config and capability config the same keys"
+
+ def _parse_distribution_config(self):
+ for env_key, config in self.distribution_config.items():
+ if env_key == "n_units":
+ continue
+ config["env_key"] = env_key
+ # add n_units key
+ config["n_units"] = self.distribution_config["n_units"]
+ distribution = get_distribution(config["dist_type"])(config)
+ self.env_key_to_distribution_map[env_key] = distribution
+
+ def reset(self):
+ reset_config = {}
+ for distribution in self.env_key_to_distribution_map.values():
+ reset_config = {**reset_config, **distribution.generate()}
+
+ return self.env.reset(reset_config)
+
+ def __getattr__(self, name):
+ if hasattr(self.env, name):
+ return getattr(self.env, name)
+ else:
+ raise AttributeError
+
+ def get_obs(self):
+ return self.env.get_obs()
+
+ def get_state(self):
+ return self.env.get_state()
+
+ def get_avail_actions(self):
+ return self.env.get_avail_actions()
+
+ def get_env_info(self):
+ return self.env.get_env_info()
+
+ def get_obs_size(self):
+ return self.env.get_obs_size()
+
+ def get_state_size(self):
+ return self.env.get_state_size()
+
+ def get_total_actions(self):
+ return self.env.get_total_actions()
+
+ def get_capabilities(self):
+ return self.env.get_capabilities()
+
+ def get_obs_agent(self, agent_id):
+ return self.env.get_obs_agent(agent_id)
+
+ def get_avail_agent_actions(self, agent_id):
+ return self.env.get_avail_agent_actions(agent_id)
+
+ def render(self):
+ return self.env.render()
+
+ def step(self, actions):
+ return self.env.step(actions)
+
+ def get_stats(self):
+ return self.env.get_stats()
+
+ def full_restart(self):
+ return self.env.full_restart()
+
+ def save_replay(self):
+ self.env.save_replay()
+
+ def close(self):
+ return self.env.close()
diff --git a/src/learners/FeUdal_learner.py b/src/learners/FeUdal_learner.py
new file mode 100644
index 0000000..4db9028
--- /dev/null
+++ b/src/learners/FeUdal_learner.py
@@ -0,0 +1,257 @@
+import copy
+from components.episode_buffer import EpisodeBatch
+from modules.mixers.vdn import VDNMixer
+from modules.mixers.qmix import QMixer
+import torch
+from torch.optim import RMSprop
+import torch.nn.functional as F
+#from modules.critics.feudal_critic import FeudalCritic
+
+import numpy as np
+
+class FeudalLearner:
+ def __init__(self, mac, scheme, logger, args):
+ self.args = args
+ self.mac = mac
+ self.logger = logger
+
+ self.manager_params = list(mac.manager_parameters())
+ self.worker_params = list(mac.worker_parameters())
+
+
+ self.last_target_update_episode = 0
+
+ self.mixer = None
+ if args.mixer is not None:
+ if args.mixer == "vdn":
+ self.mixer = VDNMixer()
+ elif args.mixer == "qmix":
+ self.mixer = QMixer(args)
+ else:
+ raise ValueError("Mixer {} not recognised.".format(args.mixer))
+ self.worker_params += list(self.mixer.parameters())
+ self.target_mixer = copy.deepcopy(self.mixer)
+
+ # Optimise manager's parameters
+ self.manager_optimiser = RMSprop(params=self.manager_params, lr=self.args.lr, alpha=self.args.optim_alpha, eps=self.args.optim_eps)
+ self.worker_optimiser = RMSprop(params=self.worker_params, lr=self.args.lr, alpha=self.args.optim_alpha, eps=self.args.optim_eps)
+
+ # a little wasteful to deepcopy (e.g. duplicates action selector), but should work for any MAC
+ self.target_mac = copy.deepcopy(mac)
+
+ self.enable_parallel_computing = (not self.args.use_cuda) and getattr(self.args, 'enable_parallel_computing',
+ False)
+ # self.enable_parallel_computing = False
+ if self.enable_parallel_computing:
+ from multiprocessing import Pool
+ # Multiprocessing pool for parallel computing.
+ self.pool = Pool(1)
+
+ self.log_stats_t = -self.args.learner_log_interval - 1
+
+ def train(self, batch: EpisodeBatch, t_env: int, episode_num: int, show_demo=False, save_data=None):
+ if self.args.use_cuda and str(self.mac.get_device()) == "cpu":
+ self.mac.cuda()
+
+ # Get the relevant quantities
+ rewards = batch["reward"][:, :-1]
+ actions = batch["actions"][:, :-1]
+ terminated = batch["terminated"][:, :-1].float()
+ mask = batch["filled"][:, :-1].float()
+ mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
+ avail_actions = batch["avail_actions"]
+
+ states = batch["obs"][:, :-1] # 倒數第二個時間步
+ next_states = batch["obs"][:, 1:] # 最後一個時間步
+
+ # Calculate estimated Q-Values
+ mac_out, goals_out, values_out = [], [], []
+ self.mac.init_hidden(batch.batch_size)
+ for t in range(batch.max_seq_length):
+ agent_outs, goals, values = self.mac.forward(batch, t=t)
+ mac_out.append(agent_outs)
+ goals_out.append(goals)
+ values_out.append(values)
+ mac_out = torch.stack(mac_out, dim=1) # Concat over time
+ goals_out = torch.stack(goals_out, dim=1) # Concat over time
+ values_out = torch.stack(values_out, dim=1) # Concat over time
+
+
+ # Use goal_out to influence action selection or target calculation
+ # Example: Modify chosen_action_qvals calculation using goal_out
+ chosen_action_qvals = torch.gather(mac_out[:, :-1], dim=3, index=actions).squeeze(3) # Remove the last dim
+ # Modify chosen_action_qvals with goal_out if necessary
+ # chosen_action_qvals = some_function(chosen_action_qvals, goal_out)
+
+ x_mac_out = mac_out.clone().detach()
+ x_mac_out[avail_actions == 0] = -9999999
+ max_action_qvals, max_action_index = x_mac_out[:, :-1].max(dim=3)
+
+ max_action_index = max_action_index.detach().unsqueeze(3)
+ is_max_action = (max_action_index == actions).int().float()
+
+ if show_demo:
+ q_i_data = chosen_action_qvals.detach().cpu().numpy()
+ q_data = (max_action_qvals - chosen_action_qvals).detach().cpu().numpy()
+
+ # Calculate the Q-Values necessary for the target
+ target_mac_out = []
+ self.target_mac.init_hidden(batch.batch_size)
+ for t in range(batch.max_seq_length):
+ target_agent_outs, _, _ = self.target_mac.forward(batch, t=t)
+ target_mac_out.append(target_agent_outs)
+
+ # We don't need the first timesteps Q-Value estimate for calculating targets
+ target_mac_out = torch.stack(target_mac_out[1:], dim=1) # Concat across time
+
+ # Max over target Q-Values
+ if self.args.double_q:
+ # Get actions that maximise live Q (for double q-learning)
+ mac_out_detach = mac_out.clone().detach()
+ mac_out_detach[avail_actions == 0] = -9999999
+ cur_max_actions = mac_out_detach[:, 1:].max(dim=3, keepdim=True)[1]
+ target_max_qvals = torch.gather(target_mac_out, 3, cur_max_actions).squeeze(3)
+ else:
+ target_max_qvals = target_mac_out.max(dim=3)[0]
+
+ # Mix
+ if self.mixer is not None:
+ chosen_action_qvals = self.mixer(chosen_action_qvals, batch["state"][:, :-1])
+ target_max_qvals = self.target_mixer(target_max_qvals, batch["state"][:, 1:])
+
+ if show_demo:
+ tot_q_data = chosen_action_qvals.detach().cpu().numpy()
+ tot_target = targets.detach().cpu().numpy()
+ if self.mixer == None:
+ tot_q_data = np.mean(tot_q_data, axis=2)
+ tot_target = np.mean(tot_target, axis=2)
+
+ print('action_pair_%d_%d' % (save_data[0], save_data[1]), np.squeeze(q_data[:, 0]),
+ np.squeeze(q_i_data[:, 0]), np.squeeze(tot_q_data[:, 0]), np.squeeze(tot_target[:, 0]))
+ self.logger.log_stat('action_pair_%d_%d' % (save_data[0], save_data[1]),
+ np.squeeze(tot_q_data[:, 0]), t_env)
+ return
+
+ #----------------------------- Calculate worker loss -----------------------------------
+ # 1. 計算內部獎勵
+ c = self.args.c # c 步數
+ states = batch["obs"][:, :-c] # s_t
+ future_states = batch["obs"][:, c:] # s_{t+c}
+ state_diff = future_states - states # 計算狀態差異
+ goals = goals_out[:, :-c] # 對應時間步的目標
+ # print("state_diff.shape: ", state_diff.shape)
+ # print("goals.shape: ", goals.shape)
+
+ # 計算內部獎勵 (使用餘弦相似度)
+ temp_state_diff = state_diff.reshape(-1, self.args.state_dim)
+ temp_goals = goals.reshape(-1, self.args.state_dim)
+ cos_sim = F.cosine_similarity(temp_state_diff, temp_goals, dim=-1)
+ cos_sim = cos_sim.view(state_diff.shape[0], state_diff.shape[1], -1)
+ intrinsic_rewards = (cos_sim / c).mean(dim=2, keepdim=True)
+
+ # 2. 組合內部和外部獎勵
+ combined_rewards = (1 - self.args.intrinsic_rewards_alpha) * rewards + \
+ self.args.intrinsic_rewards_alpha * intrinsic_rewards
+
+ # 3. 使用組合獎勵計算 TD 誤差
+ targets = combined_rewards + self.args.gamma * (1 - terminated) * target_max_qvals
+ td_error = (chosen_action_qvals - targets.detach())
+ mask = mask.expand_as(td_error)
+ # 0-out the targets that came from padded data
+ masked_td_error = td_error * mask
+ worker_loss = (masked_td_error ** 2).sum()
+
+ # 4. 優化 Worker
+ self.worker_optimiser.zero_grad()
+ worker_loss.backward(retain_graph=True)
+ grad_norm_worker = torch.nn.utils.clip_grad_norm_(self.worker_params, self.args.grad_norm_clip)
+ self.worker_optimiser.step()
+
+
+ masked_hit_prob = torch.mean(is_max_action, dim=2) * mask
+ hit_prob = masked_hit_prob.sum() / mask.sum()
+
+ #----------------------------- Calculate manager loss -----------------------------------
+ # 1. 計算狀態差異和目標
+ states = batch["obs"][:, :-c] # s_t
+ future_states = batch["obs"][:, c:] # s_{t+c}
+ state_diff = future_states - states
+ goals = goals_out[:, :-c]
+ values = values_out[:, :-c] # 使用 manager 輸出的 values
+
+ # 2. 計算優勢函數 - 為每個agent分別計算
+ rewards_expanded = rewards.unsqueeze(2).expand(-1, -1, self.args.n_agents, -1).squeeze(-1) # [batch_size, seq_len-1, n_agents]
+
+ # values_out: [batch_size, seq_len-1, n_agents]
+ next_values = values_out[:, c:].squeeze(-1) # [batch_size, seq_len-c, n_agents]
+ current_values = values.squeeze(-1) # [batch_size, seq_len-c, n_agents]
+
+ # 3. 為每個agent計算目標值和優勢
+ target_values = rewards_expanded + (self.args.gamma ** c) * next_values # [batch_size, seq_len-c, n_agents]
+ advantages = rewards_expanded - current_values # [batch_size, seq_len-c, n_agents]
+
+ # 4. 計算策略梯度
+ temp_state_diff = state_diff.reshape(-1, self.args.state_dim)
+ temp_goals = goals.reshape(-1, self.args.state_dim)
+ cos_sim = F.cosine_similarity(temp_state_diff, temp_goals, dim=-1)
+ cos_sim = cos_sim.view(state_diff.shape[0], state_diff.shape[1], -1) # [batch_size, seq_len-c, n_agents]
+
+ # 5. Manager 的損失
+ value_loss = F.mse_loss(current_values, target_values.detach())
+ manager_loss = -(advantages.detach() * cos_sim).sum()
+ total_manager_loss = manager_loss + self.args.vf_coef * value_loss
+
+ # 6. 優化 Manager
+ self.manager_optimiser.zero_grad()
+ total_manager_loss.backward()
+ grad_norm_manager = torch.nn.utils.clip_grad_norm_(self.manager_params, self.args.grad_norm_clip)
+ self.manager_optimiser.step()
+
+
+ if (episode_num - self.last_target_update_episode) / self.args.target_update_interval >= 1.0:
+ self._update_targets()
+ self.last_target_update_episode = episode_num
+
+ if t_env - self.log_stats_t >= self.args.learner_log_interval:
+ self.logger.log_stat("worker_loss", worker_loss.item(), t_env)
+ self.logger.log_stat("manager_loss", total_manager_loss.item(), t_env)
+ self.logger.log_stat("manager_cos_sim", cos_sim.mean().item(), t_env)
+ self.logger.log_stat("manager_advantage", advantages.mean().item(), t_env)
+ self.logger.log_stat("hit_prob", hit_prob.item(), t_env)
+ self.logger.log_stat("grad_norm_manager", grad_norm_manager, t_env)
+ self.logger.log_stat("grad_norm_worker", grad_norm_worker, t_env)
+ mask_elems = mask.sum().item()
+ self.logger.log_stat("td_error_abs", (masked_td_error.abs().sum().item()/mask_elems), t_env)
+ self.logger.log_stat("q_taken_mean", (chosen_action_qvals * mask).sum().item()/(mask_elems * self.args.n_agents), t_env)
+ self.logger.log_stat("target_mean", (targets * mask).sum().item()/(mask_elems * self.args.n_agents), t_env)
+ self.log_stats_t = t_env
+
+
+ def _update_targets(self):
+ self.target_mac.load_state(self.mac)
+ if self.mixer is not None:
+ self.target_mixer.load_state_dict(self.mixer.state_dict())
+ self.logger.console_logger.info("Updated target network")
+
+ def cuda(self):
+ self.mac.cuda()
+ self.target_mac.cuda()
+ if self.mixer is not None:
+ self.mixer.cuda()
+ self.target_mixer.cuda()
+
+ def save_models(self, path):
+ self.mac.save_models(path)
+ if self.mixer is not None:
+ torch.save(self.mixer.state_dict(), "{}/mixer.torch".format(path))
+ torch.save(self.worker_optimiser.state_dict(), "{}/worker_opt.torch".format(path))
+ torch.save(self.manager_optimiser.state_dict(), "{}/manager_opt.torch".format(path))
+
+ def load_models(self, path):
+ self.mac.load_models(path)
+ # Not quite right but I don't want to save target networks
+ self.target_mac.load_models(path)
+ if self.mixer is not None:
+ self.mixer.load_state_dict(torch.load("{}/mixer.torch".format(path), map_location=lambda storage, loc: storage))
+ self.worker_optimiser.load_state_dict(torch.load("{}/worker_opt.torch".format(path), map_location=lambda storage, loc: storage))
+ self.manager_optimiser.load_state_dict(torch.load("{}/manager_opt.torch".format(path), map_location=lambda storage, loc: storage))
diff --git a/src/learners/__init__.py b/src/learners/__init__.py
new file mode 100644
index 0000000..c9d885a
--- /dev/null
+++ b/src/learners/__init__.py
@@ -0,0 +1,10 @@
+from .dmaq_qatten_learner import DMAQ_qattenLearner
+from .nq_learner import NQLearner
+from .nq_learner_data_augmentation import NQLearnerDataAugmentation
+from .FeUdal_learner import FeudalLearner
+REGISTRY = {}
+
+REGISTRY["nq_learner"] = NQLearner
+REGISTRY["dmaq_qatten_learner"] = DMAQ_qattenLearner
+REGISTRY["q_learner_data_augmentation"] = NQLearnerDataAugmentation
+REGISTRY["feudal_learner"] = FeudalLearner
diff --git a/src/learners/dmaq_qatten_learner.py b/src/learners/dmaq_qatten_learner.py
new file mode 100644
index 0000000..701da37
--- /dev/null
+++ b/src/learners/dmaq_qatten_learner.py
@@ -0,0 +1,233 @@
+# From https://github.com/wjh720/QPLEX/, added here for convenience.
+import copy
+import time
+
+import torch as th
+from torch.optim import Adam
+
+from components.episode_buffer import EpisodeBatch
+from modules.mixers.dmaq_general import DMAQer
+from utils.rl_utils import build_td_lambda_targets
+from utils.th_utils import get_parameters_num
+
+
+class DMAQ_qattenLearner:
+ def __init__(self, mac, scheme, logger, args):
+ self.args = args
+ self.mac = mac
+ self.logger = logger
+
+ self.params = list(mac.parameters())
+
+ self.last_target_update_episode = 0
+
+ self.mixer = None
+ if args.mixer is not None:
+ if args.mixer == "dmaq":
+ self.mixer = DMAQer(args)
+ else:
+ raise ValueError("Mixer {} not recognised.".format(args.mixer))
+ self.params += list(self.mixer.parameters())
+ self.target_mixer = copy.deepcopy(self.mixer)
+
+ self.optimiser = Adam(params=self.params, lr=args.lr)
+
+ print('Mixer Size: ')
+ print(get_parameters_num(self.mixer.parameters()))
+
+ # a little wasteful to deepcopy (e.g. duplicates action selector), but should work for any MAC
+ self.target_mac = copy.deepcopy(mac)
+ self.log_stats_t = -self.args.learner_log_interval - 1
+ self.n_actions = self.args.n_actions
+ self.train_t = 0
+ self.avg_time = 0
+
+ def sub_train(self, batch: EpisodeBatch, t_env: int, episode_num: int, mac, mixer, optimiser, params,
+ save_data=None, split_data=False):
+ start_time = time.time()
+
+ # Get the relevant quantities
+ rewards = batch["reward"][:, :-1]
+ actions = batch["actions"][:, :-1]
+ terminated = batch["terminated"][:, :-1].float()
+ mask = batch["filled"][:, :-1].float()
+ mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
+ avail_actions = batch["avail_actions"]
+ actions_onehot = batch["actions_onehot"][:, :-1]
+
+ # Calculate estimated Q-Values
+ self.mac.set_train_mode()
+ mac_out = []
+ mac.init_hidden(batch.batch_size)
+ for t in range(batch.max_seq_length):
+ agent_outs = mac.forward(batch, t=t)
+ mac_out.append(agent_outs)
+ mac_out = th.stack(mac_out, dim=1) # Concat over time
+
+ # Pick the Q-Values for the actions taken by each agent
+ chosen_action_qvals = th.gather(mac_out[:, :-1], dim=3, index=actions).squeeze(3) # Remove the last dim
+
+ with th.no_grad():
+ mac_out_detach = mac_out.clone().detach()
+ mac_out_detach[avail_actions == 0] = -9999999
+ max_action_qvals, cur_max_actions = mac_out_detach.max(dim=3, keepdim=True) # [bs, traj, n_agent, 1]
+
+ # %%%%%%%%%%%%%%%%%%%%%%%%%% Calculate the Q-Values necessary for the target %%%%%%%%%%%%%%%%%%%%%%%%%%
+ # Set target mac to testing mode
+ self.target_mac.set_evaluation_mode()
+ target_mac_out = []
+ self.target_mac.init_hidden(batch.batch_size)
+ for t in range(batch.max_seq_length):
+ target_agent_outs = self.target_mac.forward(batch, t=t)
+ target_mac_out.append(target_agent_outs)
+
+ # We don't need the first timesteps Q-Value estimate for calculating targets
+ target_mac_out = th.stack(target_mac_out, dim=1) # Concat across time
+
+ # Max over target Q-Values
+ assert self.args.double_q
+ # Get actions that maximise live Q (for double q-learning)
+ target_chosen_qvals = th.gather(target_mac_out, 3, cur_max_actions).squeeze(3)
+
+ # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Mixer %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+ # Set mixing net to training mode
+ mixer.train()
+ # %%%%%%%%%%%%%%%%%%%%%%% Current \sum_i{Q_i} %%%%%%%%%%%%%%%%%%%%%%%%
+ ans_chosen = mixer(chosen_action_qvals, batch["state"][:, :-1], is_v=True)
+ # %%%%%%%%%%%%%%%%%%%%%%% Current \sum_i{adv_i} %%%%%%%%%%%%%%%%%%%%%%%%
+ ans_adv = mixer(chosen_action_qvals, batch["state"][:, :-1], actions=actions_onehot,
+ max_q_i=max_action_qvals[:, :-1].contiguous().squeeze(3), is_v=False)
+
+ chosen_action_qvals = ans_chosen + ans_adv
+
+ with th.no_grad():
+ self.target_mixer.eval()
+ # %%%%%%%%%%%%%%%%%%%%%%% Target \sum_i{Q_i} %%%%%%%%%%%%%%%%%%%%%%%%
+ target_chosen = self.target_mixer(target_chosen_qvals, batch["state"], is_v=True)
+
+ # %%%%%%%%%%%%%%%%%%%%%%% Target \sum_i{adv_i} %%%%%%%%%%%%%%%%%%%%%%%%
+ # Mask out unavailable actions
+ target_mac_out[avail_actions == 0] = -9999999
+ target_max_qvals = target_mac_out.max(dim=3)[0]
+ # Onehot target actions
+ cur_max_actions_onehot = th.zeros(cur_max_actions.squeeze(3).shape + (self.n_actions,),
+ device=cur_max_actions.device)
+ cur_max_actions_onehot = cur_max_actions_onehot.scatter_(3, cur_max_actions, 1)
+ target_adv = self.target_mixer(target_chosen_qvals, batch["state"],
+ actions=cur_max_actions_onehot, max_q_i=target_max_qvals, is_v=False)
+
+ target_max_qvals = target_chosen + target_adv
+
+ # Calculate 1-step Q-Learning targets
+ targets = build_td_lambda_targets(rewards, terminated, mask, target_max_qvals,
+ self.args.gamma, self.args.td_lambda)
+
+ # Td-error
+ td_error = (chosen_action_qvals - targets.detach())
+
+ mask = mask.expand_as(td_error)
+
+ # 0-out the targets that came from padded data
+ masked_td_error = td_error * mask
+
+ # Normal L2 loss, take mean over actual data
+ loss = 0.5 * (masked_td_error ** 2).sum() / mask.sum()
+
+ # Optimise
+ if not split_data:
+ optimiser.zero_grad()
+
+ loss.backward()
+
+ if not split_data:
+ grad_norm = th.nn.utils.clip_grad_norm_(params, self.args.grad_norm_clip)
+ optimiser.step()
+
+ self.train_t += 1
+ self.avg_time += (time.time() - start_time - self.avg_time) / self.train_t
+ print("Avg cost {} seconds".format(self.avg_time))
+
+ if not split_data and t_env - self.log_stats_t >= self.args.learner_log_interval:
+ with th.no_grad():
+ is_max_action = (cur_max_actions[:, :-1] == actions).int().float()
+ masked_hit_prob = th.mean(is_max_action, dim=2) * mask
+ hit_prob = masked_hit_prob.sum() / mask.sum()
+ self.logger.log_stat("loss", loss.item(), t_env)
+ self.logger.log_stat("hit_prob", hit_prob.item(), t_env)
+ self.logger.log_stat("grad_norm", grad_norm, t_env)
+ mask_elems = mask.sum().item()
+ self.logger.log_stat("td_error_abs", (masked_td_error.abs().sum().item() / mask_elems), t_env)
+ self.logger.log_stat("q_taken_mean",
+ (chosen_action_qvals * mask).sum().item() / (mask_elems * self.args.n_agents),
+ t_env)
+ self.logger.log_stat("target_mean", (targets * mask).sum().item() / (mask_elems * self.args.n_agents),
+ t_env)
+ self.log_stats_t = t_env
+
+ def train(self, batch: EpisodeBatch, t_env: int, episode_num: int, save_data=None):
+ if self.args.use_cuda and str(self.mac.get_device()) == "cpu":
+ self.mac.cuda()
+
+ if self.args.n_agents > 20:
+ split_num = 2
+ a, b, c, d = batch.split(split_num)
+
+ # Optimise
+ self.optimiser.zero_grad()
+
+ self.sub_train(a, t_env, episode_num, self.mac, self.mixer, self.optimiser, self.params,
+ save_data=save_data, split_data=True)
+ del a
+
+ self.sub_train(b, t_env, episode_num, self.mac, self.mixer, self.optimiser, self.params,
+ save_data=save_data, split_data=True)
+ del b
+
+ self.sub_train(c, t_env, episode_num, self.mac, self.mixer, self.optimiser, self.params,
+ save_data=save_data, split_data=True)
+ del c
+
+ self.sub_train(d, t_env, episode_num, self.mac, self.mixer, self.optimiser, self.params,
+ save_data=save_data, split_data=True)
+ del d
+
+ # Optimise
+ grad_norm = th.nn.utils.clip_grad_norm_(self.params, self.args.grad_norm_clip)
+ self.optimiser.step()
+
+ else:
+ self.sub_train(batch, t_env, episode_num, self.mac, self.mixer, self.optimiser, self.params,
+ save_data=save_data, split_data=False)
+
+ if (episode_num - self.last_target_update_episode) / self.args.target_update_interval >= 1.0:
+ self._update_targets()
+ self.last_target_update_episode = episode_num
+
+ def _update_targets(self):
+ self.target_mac.load_state(self.mac)
+ if self.mixer is not None:
+ self.target_mixer.load_state_dict(self.mixer.state_dict())
+ self.logger.console_logger.info("Updated target network")
+
+ def cuda(self):
+ self.mac.cuda()
+ self.target_mac.cuda()
+ if self.mixer is not None:
+ self.mixer.cuda()
+ self.target_mixer.cuda()
+
+ def save_models(self, path):
+ self.mac.save_models(path)
+ if self.mixer is not None:
+ th.save(self.mixer.state_dict(), "{}/mixer.th".format(path))
+ th.save(self.optimiser.state_dict(), "{}/opt.th".format(path))
+
+ def load_models(self, path):
+ self.mac.load_models(path)
+ # Not quite right but I don't want to save target networks
+ self.target_mac.load_models(path)
+ if self.mixer is not None:
+ self.mixer.load_state_dict(th.load("{}/mixer.th".format(path), map_location=lambda storage, loc: storage))
+ self.target_mixer.load_state_dict(th.load("{}/mixer.th".format(path),
+ map_location=lambda storage, loc: storage))
+ self.optimiser.load_state_dict(th.load("{}/opt.th".format(path), map_location=lambda storage, loc: storage))
diff --git a/src/learners/nq_learner.py b/src/learners/nq_learner.py
new file mode 100644
index 0000000..3eb67b2
--- /dev/null
+++ b/src/learners/nq_learner.py
@@ -0,0 +1,234 @@
+import copy
+import time
+
+import torch as th
+from torch.optim import RMSprop, Adam
+
+from components.episode_buffer import EpisodeBatch
+from modules.mixers.nmix import Mixer
+from modules.mixers.qatten import QattenMixer
+from modules.mixers.vdn import VDNMixer
+from utils.rl_utils import build_td_lambda_targets, build_q_lambda_targets
+from utils.th_utils import get_parameters_num
+
+
+def calculate_target_q(target_mac, batch, enable_parallel_computing=False, thread_num=4):
+ if enable_parallel_computing:
+ th.set_num_threads(thread_num)
+ with th.no_grad():
+ # Set target mac to testing mode
+ target_mac.set_evaluation_mode()
+ target_mac_out = []
+ target_mac.init_hidden(batch.batch_size)
+ for t in range(batch.max_seq_length):
+ target_agent_outs = target_mac.forward(batch, t=t)
+ target_mac_out.append(target_agent_outs)
+
+ # We don't need the first timesteps Q-Value estimate for calculating targets
+ target_mac_out = th.stack(target_mac_out, dim=1) # Concat across time
+ return target_mac_out
+
+
+def calculate_n_step_td_target(target_mixer, target_max_qvals, batch, rewards, terminated, mask, gamma, td_lambda,
+ enable_parallel_computing=False, thread_num=4, q_lambda=False, target_mac_out=None):
+ if enable_parallel_computing:
+ th.set_num_threads(thread_num)
+
+ with th.no_grad():
+ # Set target mixing net to testing mode
+ target_mixer.eval()
+ # Calculate n-step Q-Learning targets
+ target_max_qvals = target_mixer(target_max_qvals, batch["state"])
+
+ if q_lambda:
+ raise NotImplementedError
+ qvals = th.gather(target_mac_out, 3, batch["actions"]).squeeze(3)
+ qvals = target_mixer(qvals, batch["state"])
+ targets = build_q_lambda_targets(rewards, terminated, mask, target_max_qvals, qvals, gamma, td_lambda)
+ else:
+ targets = build_td_lambda_targets(rewards, terminated, mask, target_max_qvals, gamma, td_lambda)
+ return targets.detach()
+
+
+class NQLearner:
+ def __init__(self, mac, scheme, logger, args):
+ self.args = args
+ self.mac = mac
+ self.logger = logger
+
+ self.last_target_update_episode = 0
+ self.device = th.device('cuda' if args.use_cuda else 'cpu')
+ self.params = list(mac.parameters())
+
+ if args.mixer == "qatten":
+ self.mixer = QattenMixer(args)
+ elif args.mixer == "vdn":
+ self.mixer = VDNMixer()
+ elif args.mixer == "qmix": # 31.521K
+ self.mixer = Mixer(args)
+ else:
+ raise "mixer error"
+
+ self.target_mixer = copy.deepcopy(self.mixer)
+ self.params += list(self.mixer.parameters())
+
+ # print('Mixer Size: ')
+ # print(get_parameters_num(self.mixer.parameters()))
+ self.logger.console_logger.info('Mixer Size: {}'.format(get_parameters_num(self.mixer.parameters())))
+
+ if self.args.optimizer == 'adam':
+ self.optimiser = Adam(params=self.params, lr=args.lr, weight_decay=getattr(args, "weight_decay", 0))
+ else:
+ self.optimiser = RMSprop(params=self.params, lr=args.lr, alpha=args.optim_alpha, eps=args.optim_eps)
+
+ # a little wasteful to deepcopy (e.g. duplicates action selector), but should work for any MAC
+ self.target_mac = copy.deepcopy(mac)
+ self.log_stats_t = -self.args.learner_log_interval - 1
+ self.train_t = 0
+ self.avg_time = 0
+
+ self.enable_parallel_computing = (not self.args.use_cuda) and getattr(self.args, 'enable_parallel_computing',
+ False)
+ # self.enable_parallel_computing = False
+ if self.enable_parallel_computing:
+ from multiprocessing import Pool
+ # Multiprocessing pool for parallel computing.
+ self.pool = Pool(1)
+
+ def train(self, batch: EpisodeBatch, t_env: int, episode_num: int):
+ start_time = time.time()
+ if self.args.use_cuda and str(self.mac.get_device()) == "cpu":
+ self.mac.cuda()
+
+ # Get the relevant quantities
+ rewards = batch["reward"][:, :-1]
+ actions = batch["actions"][:, :-1]
+ terminated = batch["terminated"][:, :-1].float()
+ mask = batch["filled"][:, :-1].float()
+ mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
+ avail_actions = batch["avail_actions"]
+
+ if self.enable_parallel_computing:
+ target_mac_out = self.pool.apply_async(
+ calculate_target_q,
+ (self.target_mac, batch, True, self.args.thread_num)
+ )
+
+ # Calculate estimated Q-Values
+ self.mac.set_train_mode()
+ mac_out = []
+ self.mac.init_hidden(batch.batch_size)
+ for t in range(batch.max_seq_length):
+ agent_outs = self.mac.forward(batch, t=t)
+ mac_out.append(agent_outs)
+ mac_out = th.stack(mac_out, dim=1) # Concat over time
+ # TODO: double DQN action, COMMENT: do not need copy
+ mac_out[avail_actions == 0] = -9999999
+ # Pick the Q-Values for the actions taken by each agent
+ chosen_action_qvals = th.gather(mac_out[:, :-1], dim=3, index=actions).squeeze(3) # Remove the last dim
+
+ # Calculate the Q-Values necessary for the target
+ with th.no_grad():
+ if self.enable_parallel_computing:
+ target_mac_out = target_mac_out.get()
+ else:
+ target_mac_out = calculate_target_q(self.target_mac, batch)
+
+ # Max over target Q-Values/ Double q learning
+ # mac_out_detach = mac_out.clone().detach()
+ # TODO: COMMENT: do not need copy
+ mac_out_detach = mac_out
+ # mac_out_detach[avail_actions == 0] = -9999999
+ cur_max_actions = mac_out_detach.max(dim=3, keepdim=True)[1]
+
+ target_max_qvals = th.gather(target_mac_out, 3, cur_max_actions).squeeze(3)
+
+ assert getattr(self.args, 'q_lambda', False) == False
+ if self.args.mixer.find("qmix") != -1 and self.enable_parallel_computing:
+ targets = self.pool.apply_async(
+ calculate_n_step_td_target,
+ (self.target_mixer, target_max_qvals, batch, rewards, terminated, mask, self.args.gamma,
+ self.args.td_lambda, True, self.args.thread_num, False, None)
+ )
+ else:
+ targets = calculate_n_step_td_target(
+ self.target_mixer, target_max_qvals, batch, rewards, terminated, mask, self.args.gamma,
+ self.args.td_lambda
+ )
+
+ # Set mixing net to training mode
+ self.mixer.train()
+ # Mixer
+ chosen_action_qvals = self.mixer(chosen_action_qvals, batch["state"][:, :-1])
+
+ if self.args.mixer.find("qmix") != -1 and self.enable_parallel_computing:
+ targets = targets.get()
+
+ td_error = (chosen_action_qvals - targets)
+ td_error2 = 0.5 * td_error.pow(2)
+
+ mask = mask.expand_as(td_error2)
+ masked_td_error = td_error2 * mask
+
+ mask_elems = mask.sum()
+ loss = masked_td_error.sum() / mask_elems
+
+ # Optimise
+ self.optimiser.zero_grad()
+ loss.backward()
+ grad_norm = th.nn.utils.clip_grad_norm_(self.params, self.args.grad_norm_clip)
+ self.optimiser.step()
+
+ self.train_t += 1
+ self.avg_time += (time.time() - start_time - self.avg_time) / self.train_t
+ #print("Avg cost {} seconds".format(self.avg_time))
+ self.logger.console_logger.info("Avg cost {} seconds".format(self.avg_time))
+
+ if (episode_num - self.last_target_update_episode) / self.args.target_update_interval >= 1.0:
+ self._update_targets()
+ self.last_target_update_episode = episode_num
+
+ if t_env - self.log_stats_t >= self.args.learner_log_interval:
+ # For log
+ with th.no_grad():
+ mask_elems = mask_elems.item()
+ td_error_abs = masked_td_error.abs().sum().item() / mask_elems
+ q_taken_mean = (chosen_action_qvals * mask).sum().item() / (mask_elems * self.args.n_agents)
+ target_mean = (targets * mask).sum().item() / (mask_elems * self.args.n_agents)
+ self.logger.log_stat("loss_td", loss.item(), t_env)
+ self.logger.log_stat("grad_norm", grad_norm, t_env)
+ self.logger.log_stat("td_error_abs", td_error_abs, t_env)
+ self.logger.log_stat("q_taken_mean", q_taken_mean, t_env)
+ self.logger.log_stat("target_mean", target_mean, t_env)
+ self.log_stats_t = t_env
+
+ def _update_targets(self):
+ self.target_mac.load_state(self.mac)
+ if self.mixer is not None:
+ self.target_mixer.load_state_dict(self.mixer.state_dict())
+ self.logger.console_logger.info("Updated target network")
+
+ def cuda(self):
+ self.mac.cuda()
+ self.target_mac.cuda()
+ if self.mixer is not None:
+ self.mixer.cuda()
+ self.target_mixer.cuda()
+
+ def save_models(self, path):
+ self.mac.save_models(path)
+ if self.mixer is not None:
+ th.save(self.mixer.state_dict(), "{}/mixer.th".format(path))
+ th.save(self.optimiser.state_dict(), "{}/opt.th".format(path))
+
+ def load_models(self, path):
+ self.mac.load_models(path)
+ # Not quite right but I don't want to save target networks
+ self.target_mac.load_models(path)
+ if self.mixer is not None:
+ self.mixer.load_state_dict(th.load("{}/mixer.th".format(path), map_location=lambda storage, loc: storage))
+ self.optimiser.load_state_dict(th.load("{}/opt.th".format(path), map_location=lambda storage, loc: storage))
+
+ def __del__(self):
+ if self.enable_parallel_computing:
+ self.pool.close()
diff --git a/src/learners/nq_learner_data_augmentation.py b/src/learners/nq_learner_data_augmentation.py
new file mode 100644
index 0000000..c52640b
--- /dev/null
+++ b/src/learners/nq_learner_data_augmentation.py
@@ -0,0 +1,243 @@
+import copy
+import time
+
+import torch as th
+from multiprocessing import Pool
+from torch.optim import RMSprop, Adam
+
+from components.episode_buffer import EpisodeBatch
+from modules.mixers.nmix import Mixer
+from modules.mixers.qatten import QattenMixer
+from modules.mixers.vdn import VDNMixer
+from utils.rl_utils import build_td_lambda_targets, build_q_lambda_targets
+from utils.th_utils import get_parameters_num
+from utils.data_processing import do_data_augmentation
+
+
+def calculate_target_q(target_mac, batch, enable_parallel_computing=False, thread_num=4):
+ if enable_parallel_computing:
+ th.set_num_threads(thread_num)
+ with th.no_grad():
+ # Set target mac to testing mode
+ # target_mac.set_evaluation_mode()
+ target_mac_out = []
+ target_mac.init_hidden(batch.batch_size)
+ for t in range(batch.max_seq_length):
+ target_agent_outs = target_mac.forward(batch, t=t)
+ target_mac_out.append(target_agent_outs)
+
+ # We don't need the first timesteps Q-Value estimate for calculating targets
+ target_mac_out = th.stack(target_mac_out, dim=1) # Concat across time
+ return target_mac_out
+
+
+def calculate_n_step_td_target(target_mixer, target_max_qvals, batch, rewards, terminated, mask, gamma, td_lambda,
+ enable_parallel_computing=False, thread_num=4, q_lambda=False, target_mac_out=None):
+ if enable_parallel_computing:
+ th.set_num_threads(thread_num)
+
+ with th.no_grad():
+ # Set target mixing net to testing mode
+ target_mixer.eval()
+ # Calculate n-step Q-Learning targets
+ target_max_qvals = target_mixer(target_max_qvals, batch["state"])
+
+ if q_lambda:
+ qvals = th.gather(target_mac_out, 3, batch["actions"]).squeeze(3)
+ qvals = target_mixer(qvals, batch["state"])
+ targets = build_q_lambda_targets(rewards, terminated, mask, target_max_qvals, qvals, gamma, td_lambda)
+ else:
+ targets = build_td_lambda_targets(rewards, terminated, mask, target_max_qvals, gamma, td_lambda)
+ return targets.detach()
+
+
+class NQLearnerDataAugmentation:
+ def __init__(self, mac, scheme, logger, args):
+ self.args = args
+ self.mac = mac
+ self.logger = logger
+
+ self.last_target_update_episode = 0
+ self.device = th.device('cuda' if args.use_cuda else 'cpu')
+ self.params = list(mac.parameters())
+
+ if args.mixer == "qatten":
+ self.mixer = QattenMixer(args)
+ elif args.mixer == "vdn":
+ self.mixer = VDNMixer()
+ elif args.mixer == "qmix": # 31.521K
+ self.mixer = Mixer(args)
+ else:
+ raise "mixer error"
+
+ self.target_mixer = copy.deepcopy(self.mixer)
+ self.params += list(self.mixer.parameters())
+
+ print('Mixer Size: ')
+ print(get_parameters_num(self.mixer.parameters()))
+
+ if self.args.optimizer == 'adam':
+ self.optimiser = Adam(params=self.params, lr=args.lr, weight_decay=getattr(args, "weight_decay", 0))
+ else:
+ self.optimiser = RMSprop(params=self.params, lr=args.lr, alpha=args.optim_alpha, eps=args.optim_eps)
+
+ # a little wasteful to deepcopy (e.g. duplicates action selector), but should work for any MAC
+ self.target_mac = copy.deepcopy(mac)
+ self.log_stats_t = -self.args.learner_log_interval - 1
+ self.train_t = 0
+ self.avg_time = 0
+
+ self.enable_parallel_computing = (not self.args.use_cuda) and getattr(self.args, 'enable_parallel_computing',
+ True)
+ # self.enable_parallel_computing = False
+ if self.enable_parallel_computing:
+ # Multiprocessing pool for parallel computing.
+ # ctx = th.multiprocessing.get_context("spawn")
+ # self.pool = ctx.Pool()
+ self.pool = Pool(1)
+
+ def train_each_batch(self, batch):
+ # Get the relevant quantities
+ rewards = batch["reward"][:, :-1]
+ actions = batch["actions"][:, :-1]
+ terminated = batch["terminated"][:, :-1].float()
+ mask = batch["filled"][:, :-1].float()
+ mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
+ avail_actions = batch["avail_actions"]
+
+ if self.enable_parallel_computing:
+ target_mac_out = self.pool.apply_async(
+ calculate_target_q,
+ (self.target_mac, batch, True, self.args.thread_num)
+ )
+
+ # Calculate estimated Q-Values
+ self.mac.set_train_mode()
+ mac_out = []
+ self.mac.init_hidden(batch.batch_size)
+ for t in range(batch.max_seq_length):
+ agent_outs = self.mac.forward(batch, t=t)
+ mac_out.append(agent_outs)
+ mac_out = th.stack(mac_out, dim=1) # Concat over time
+
+ # Pick the Q-Values for the actions taken by each agent
+ chosen_action_qvals = th.gather(mac_out[:, :-1], dim=3, index=actions).squeeze(3) # Remove the last dim
+
+ # Calculate the Q-Values necessary for the target
+ with th.no_grad():
+ if self.enable_parallel_computing:
+ target_mac_out = target_mac_out.get()
+ else:
+ target_mac_out = calculate_target_q(self.target_mac, batch)
+
+ # Max over target Q-Values/ Double q learning
+ mac_out_detach = mac_out.clone().detach()
+ mac_out_detach[avail_actions == 0] = -9999999
+ cur_max_actions = mac_out_detach.max(dim=3, keepdim=True)[1]
+ target_max_qvals = th.gather(target_mac_out, 3, cur_max_actions).squeeze(3)
+
+ assert getattr(self.args, 'q_lambda', False) == False
+ if self.args.mixer.find("qmix") != -1 and self.enable_parallel_computing:
+ targets = self.pool.apply_async(
+ calculate_n_step_td_target,
+ (self.target_mixer, target_max_qvals, batch, rewards, terminated, mask, self.args.gamma,
+ self.args.td_lambda, True, self.args.thread_num, False, None)
+ )
+ else:
+ targets = calculate_n_step_td_target(
+ self.target_mixer, target_max_qvals, batch, rewards, terminated, mask, self.args.gamma,
+ self.args.td_lambda
+ )
+
+ # Set mixing net to training mode
+ self.mixer.train()
+ # Mixer
+ chosen_action_qvals = self.mixer(chosen_action_qvals, batch["state"][:, :-1])
+
+ if self.args.mixer.find("qmix") != -1 and self.enable_parallel_computing:
+ targets = targets.get()
+
+ td_error = (chosen_action_qvals - targets)
+ td_error2 = 0.5 * td_error.pow(2)
+
+ mask = mask.expand_as(td_error2)
+ masked_td_error = td_error2 * mask
+
+ mask_elems = mask.sum()
+ loss = masked_td_error.sum() / mask_elems
+
+ # Optimise
+ self.optimiser.zero_grad()
+ loss.backward()
+ # Optimise
+ grad_norm = th.nn.utils.clip_grad_norm_(self.params, self.args.grad_norm_clip)
+ self.optimiser.step()
+
+ # For log
+ with th.no_grad():
+ td_error_abs = (masked_td_error.abs().sum() / mask_elems).item()
+ q_taken_mean = (chosen_action_qvals * mask).sum() / (mask_elems * self.args.n_agents).item()
+ target_mean = (targets * mask).sum() / (mask_elems * self.args.n_agents).item()
+ return loss, td_error_abs, q_taken_mean, target_mean, grad_norm
+
+ def train(self, batch: EpisodeBatch, t_env: int, episode_num: int):
+ start_time = time.time()
+ if self.args.use_cuda and str(self.mac.get_device()) == "cpu":
+ self.mac.cuda()
+
+ # Do data augmentation here.
+ for data_augmentation_time in range(self.args.augment_times):
+ new_batch = do_data_augmentation(self.args, batch, augment_times=1)
+ # print(new_batch.batch_size)
+ loss, td_error_abs, q_taken_mean, target_mean, grad_norm = self.train_each_batch(new_batch)
+ del new_batch
+
+ loss, td_error_abs, q_taken_mean, target_mean, grad_norm = self.train_each_batch(batch)
+
+ self.train_t += 1
+ self.avg_time += (time.time() - start_time - self.avg_time) / self.train_t
+ print("Avg cost {} seconds".format(self.avg_time))
+
+ if (episode_num - self.last_target_update_episode) / self.args.target_update_interval >= 1.0:
+ self._update_targets()
+ self.last_target_update_episode = episode_num
+
+ if t_env - self.log_stats_t >= self.args.learner_log_interval:
+ self.logger.log_stat("loss_td", loss, t_env)
+ self.logger.log_stat("grad_norm", grad_norm, t_env)
+
+ self.logger.log_stat("td_error_abs", td_error_abs, t_env)
+ self.logger.log_stat("q_taken_mean", q_taken_mean, t_env)
+ self.logger.log_stat("target_mean", target_mean, t_env)
+ self.log_stats_t = t_env
+
+ def _update_targets(self):
+ self.target_mac.load_state(self.mac)
+ if self.mixer is not None:
+ self.target_mixer.load_state_dict(self.mixer.state_dict())
+ self.logger.console_logger.info("Updated target network")
+
+ def cuda(self):
+ self.mac.cuda()
+ self.target_mac.cuda()
+ if self.mixer is not None:
+ self.mixer.cuda()
+ self.target_mixer.cuda()
+
+ def save_models(self, path):
+ self.mac.save_models(path)
+ if self.mixer is not None:
+ th.save(self.mixer.state_dict(), "{}/mixer.th".format(path))
+ th.save(self.optimiser.state_dict(), "{}/opt.th".format(path))
+
+ def load_models(self, path):
+ self.mac.load_models(path)
+ # Not quite right but I don't want to save target networks
+ self.target_mac.load_models(path)
+ if self.mixer is not None:
+ self.mixer.load_state_dict(th.load("{}/mixer.th".format(path), map_location=lambda storage, loc: storage))
+ self.optimiser.load_state_dict(th.load("{}/opt.th".format(path), map_location=lambda storage, loc: storage))
+
+ def __del__(self):
+ if self.enable_parallel_computing:
+ self.pool.close()
diff --git a/src/main.py b/src/main.py
new file mode 100644
index 0000000..199f3a0
--- /dev/null
+++ b/src/main.py
@@ -0,0 +1,124 @@
+import random
+
+import numpy as np
+import os
+import collections
+from os.path import dirname, abspath, join
+from copy import deepcopy
+from sacred import Experiment, SETTINGS
+from sacred.observers import FileStorageObserver
+from sacred.utils import apply_backspaces_and_linefeeds
+import sys
+import torch as th
+from utils.logging import get_logger
+import yaml
+import collections.abc
+
+from run import REGISTRY as run_REGISTRY
+
+SETTINGS['CAPTURE_MODE'] = "no" # set to "no" if you want to see stdout/stderr in console "fd" or "no"
+logger = get_logger()
+
+ex = Experiment("pymarl")
+ex.logger = logger
+ex.captured_out_filter = apply_backspaces_and_linefeeds
+
+results_path = join(dirname(dirname(abspath(__file__))))
+
+
+@ex.main
+def my_main(_run, _config, _log):
+ # Setting the random seed throughout the modules
+ config = config_copy(_config)
+ random.seed(config["seed"])
+ np.random.seed(config["seed"])
+ th.manual_seed(config["seed"])
+ th.cuda.manual_seed(config["seed"])
+ # th.cuda.manual_seed_all(config["seed"])
+ th.backends.cudnn.deterministic = True # cudnn
+
+
+ config['env_args']['seed'] = config["seed"]
+
+ # run
+ run_REGISTRY[_config['run']](_run, config, _log)
+
+
+def _get_config(params, arg_name, subfolder):
+ config_name = None
+ for _i, _v in enumerate(params):
+ if _v.split("=")[0] == arg_name:
+ config_name = _v.split("=")[1]
+ del params[_i]
+ break
+
+ if config_name is not None:
+ with open(os.path.join(os.path.dirname(__file__), "config", subfolder, "{}.yaml".format(config_name)),
+ "r") as f:
+ try:
+ config_dict = yaml.load(f, Loader=yaml.SafeLoader)
+ except yaml.YAMLError as exc:
+ assert False, "{}.yaml error: {}".format(config_name, exc)
+ return config_dict
+
+
+def recursive_dict_update(d, u):
+ for k, v in u.items():
+ if isinstance(v, collections.abc.Mapping):
+ d[k] = recursive_dict_update(d.get(k, {}), v)
+ else:
+ d[k] = v
+ return d
+
+
+def config_copy(config):
+ if isinstance(config, dict):
+ return {k: config_copy(v) for k, v in config.items()}
+ elif isinstance(config, list):
+ return [config_copy(v) for v in config]
+ else:
+ return deepcopy(config)
+
+
+def parse_command(params, key, default):
+ result = default
+ for _i, _v in enumerate(params):
+ if _v.split("=")[0].strip() == key:
+ result = _v[_v.index('=') + 1:].strip()
+ break
+ return result
+
+
+if __name__ == '__main__':
+ params = deepcopy(sys.argv)
+
+ # Get the defaults from default.yaml
+ with open(os.path.join(os.path.dirname(__file__), "config", "default.yaml"), "r") as f:
+ try:
+ config_dict = yaml.load(f, Loader=yaml.SafeLoader)
+ except yaml.YAMLError as exc:
+ assert False, "default.yaml error: {}".format(exc)
+
+ # Load algorithm and env base configs
+ env_config = _get_config(params, "--env-config", "envs")
+ alg_config = _get_config(params, "--config", "algs")
+ # config_dict = {**config_dict, **env_config, **alg_config}
+ config_dict = recursive_dict_update(config_dict, env_config)
+ config_dict = recursive_dict_update(config_dict, alg_config)
+
+ # now add all the config to sacred
+ ex.add_config(config_dict)
+
+ # Save to disk by default for sacred
+ map_name = parse_command(params, "env_args.map_name", config_dict['env_args']['map_name'])
+ algo_name = parse_command(params, "name", config_dict['name'])
+ local_results_path = parse_command(params, "local_results_path", config_dict['local_results_path'])
+ file_obs_path = join(results_path, local_results_path, "sacred", map_name, algo_name)
+
+ logger.info("Saving to FileStorageObserver in {}.".format(file_obs_path))
+ ex.observers.append(FileStorageObserver.create(file_obs_path))
+
+ ex.run_commandline(params)
+
+ # flush
+ sys.stdout.flush()
diff --git a/src/modules/__init__.py b/src/modules/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/modules/agents/FeUdal_agent.py b/src/modules/agents/FeUdal_agent.py
new file mode 100644
index 0000000..0295c55
--- /dev/null
+++ b/src/modules/agents/FeUdal_agent.py
@@ -0,0 +1,92 @@
+import torch.nn as nn
+import torch.nn.functional as F
+import torch
+
+class Feudal_ManagerAgent(nn.Module):
+ def __init__(self, input_shape, args):
+ super(Feudal_ManagerAgent, self).__init__()
+ self.args = args
+
+ # Manager network
+ self.manager_fc1 = nn.Linear(input_shape, args.manager_hidden_dim)
+ #self.manager_rnn = nn.GRUCell(args.manager_hidden_dim, args.manager_hidden_dim)
+ self.manager_rnn = nn.LSTMCell(args.manager_hidden_dim, args.manager_hidden_dim)
+ self.manager_fc2 = nn.Linear(args.manager_hidden_dim, args.state_dim)
+
+ # 目標生成
+ self.goal_network = nn.Linear(args.manager_hidden_dim, args.state_dim)
+
+ # 狀態值估計 V_t^M
+ self.value_network = nn.Linear(args.manager_hidden_dim, 1)
+
+ def init_hidden(self):
+ # Initialize hidden states for both manager and worker
+ manager_hidden = self.manager_fc1.weight.new(1, self.args.manager_hidden_dim).zero_()
+ manager_cell = self.manager_fc1.weight.new(1, self.args.manager_hidden_dim).zero_()
+ return (manager_hidden, manager_cell)
+
+ def forward(self, inputs, hidden):
+ x = F.relu(self.manager_fc1(inputs))
+ h_in, c_in = hidden
+ h_in = h_in.reshape(-1, self.args.manager_hidden_dim)
+ c_in = c_in.reshape(-1, self.args.manager_hidden_dim)
+ h, c = self.manager_rnn(x, (h_in, c_in))
+
+ # 生成目標
+ goal = self.goal_network(h)
+
+ # 估計狀態值
+ value = self.value_network(h)
+
+ return goal, value, (h, c)
+
+class Feudal_WorkerAgent(nn.Module):
+ def __init__(self, input_shape, args):
+ super(Feudal_WorkerAgent, self).__init__()
+ self.args = args
+
+ # Worker 網絡
+ self.worker_fc1 = nn.Linear(input_shape, args.worker_hidden_dim)
+ self.worker_rnn = nn.LSTMCell(args.worker_hidden_dim, args.worker_hidden_dim)
+
+ # U_t: Action embedding matrix (1x16)
+ self.U_embedding = nn.Linear(args.worker_hidden_dim, args.embedding_dim)
+
+ # w_t: 優勢方向/權重 (1x16)
+ self.w_network = nn.Linear(args.state_dim, args.embedding_dim)
+
+ # 最終的 Q 值輸出
+ self.q_network = nn.Linear(args.embedding_dim, args.n_actions)
+
+ def init_hidden(self):
+ # Initialize hidden states for both manager and worker
+ worker_hidden = self.worker_fc1.weight.new(1, self.args.worker_hidden_dim).zero_()
+ worker_cell = self.worker_fc1.weight.new(1, self.args.worker_hidden_dim).zero_()
+ return (worker_hidden, worker_cell)
+
+ def forward(self, inputs, worker_hidden, single_past_goals, batch_past_goals, goal):
+ # Worker RNN
+ x = F.relu(self.worker_fc1(inputs))
+ h_in, c_in = worker_hidden
+ h_in = h_in.reshape(-1, self.args.worker_hidden_dim)
+ c_in = c_in.reshape(-1, self.args.worker_hidden_dim)
+ h, c = self.worker_rnn(x, (h_in, c_in))
+
+ # 生成 U_t (action embedding matrix)
+ U_t = self.U_embedding(h)
+
+ # 生成 w_t (優勢方向)
+ w_t = self.w_network(goal)
+
+ # 計算 s_t (weighted state)
+ s_t = U_t * w_t # element-wise multiplication
+
+ # 生成 Q 值
+ q = self.q_network(s_t)
+
+ return q, (h, c), single_past_goals, batch_past_goals
+
+
+
+
+
diff --git a/src/modules/agents/__init__.py b/src/modules/agents/__init__.py
new file mode 100644
index 0000000..ee9ac85
--- /dev/null
+++ b/src/modules/agents/__init__.py
@@ -0,0 +1,23 @@
+REGISTRY = {}
+
+from .hpn_rnn_agent import HPN_RNNAgent
+from .hpns_rnn_agent import HPNS_RNNAgent
+from .asn_rnn_agent import AsnRNNAgent
+from .deepset_hyper_rnn_agent import DeepSetHyperRNNAgent
+from .deepset_rnn_agent import DeepSetRNNAgent
+from .gnn_rnn_agent import GnnRNNAgent
+from .n_rnn_agent import NRNNAgent
+from .rnn_agent import RNNAgent
+from .updet_agent import UPDeT
+from .FeUdal_agent import Feudal_ManagerAgent
+
+REGISTRY["rnn"] = RNNAgent
+REGISTRY["n_rnn"] = NRNNAgent
+REGISTRY["hpn_rnn"] = HPN_RNNAgent
+REGISTRY["hpns_rnn"] = HPNS_RNNAgent
+REGISTRY["deepset_rnn"] = DeepSetRNNAgent
+REGISTRY["deepset_hyper_rnn"] = DeepSetHyperRNNAgent
+REGISTRY["updet_agent"] = UPDeT
+REGISTRY["asn_rnn"] = AsnRNNAgent
+REGISTRY["gnn_rnn"] = GnnRNNAgent
+REGISTRY["feudal_agent"] = Feudal_ManagerAgent
diff --git a/src/modules/agents/asn_rnn_agent.py b/src/modules/agents/asn_rnn_agent.py
new file mode 100644
index 0000000..cb307cd
--- /dev/null
+++ b/src/modules/agents/asn_rnn_agent.py
@@ -0,0 +1,77 @@
+import json
+
+import torch as th
+import torch.nn as nn
+import torch.nn.functional as F
+
+def read_json(path):
+ with open(path, 'rb') as f:
+ data = json.load(f)
+ return data
+
+
+class AsnRNNAgent(nn.Module):
+ def __init__(self, input_shape, args):
+ super(AsnRNNAgent, self).__init__()
+ self.args = args
+
+ print('#' * 18)
+ print('using asn')
+ print(args)
+ print('#' * 18)
+
+ self.map_name = args.env_args['map_name'] + '_obs'
+ # config path
+ map_config = read_json('./obs_config.json')
+
+ assert self.map_name in map_config.keys(), 'map config not find'
+ assert input_shape == map_config[self.map_name]['model_input_size'], 'input shape mismatch'
+
+ self.enemies_feat_start = map_config[self.map_name]['model_input_compose']['0']['size']
+ self.enemies_num, self.enemy_feats_size = map_config[self.map_name]['model_input_compose']['1']['size']
+
+ # network struct
+ self.env_info_fc1 = nn.Linear(input_shape, args.asn_hidden_size)
+ self.env_info_fc2 = nn.Linear(args.asn_hidden_size, args.asn_hidden_size)
+ self.env_info_rnn3 = nn.GRUCell(args.asn_hidden_size, args.asn_hidden_size)
+
+ # no-op + stop + up, down, left, right
+ self.wo_action_fc = nn.Linear(args.asn_hidden_size, 6)
+
+
+ self.enemies_info_fc1 = nn.Linear(self.enemy_feats_size, args.asn_hidden_size)
+ self.enemies_info_fc2 = nn.Linear(args.asn_hidden_size, args.asn_hidden_size)
+ self.enemies_info_rnn3 = nn.GRUCell(args.asn_hidden_size, args.asn_hidden_size)
+
+ def init_hidden(self):
+ # make hidden states on same device as model
+ return self.env_info_fc1.weight.new(1, self.args.asn_hidden_size * (1 + self.enemies_num)).zero_()
+
+ def forward(self, inputs, hidden_state):
+ b, a, e = inputs.size()
+ inputs = inputs.view(-1, e)
+
+ if hidden_state is not None:
+ hidden_state = hidden_state.reshape(-1, self.args.asn_hidden_size * (1 + self.enemies_num))
+
+ enemies_feats = [inputs[:, self.enemies_feat_start + i * self.enemy_feats_size: self.enemies_feat_start + self.enemy_feats_size * (1 + i)] for i in range(self.enemies_num)]
+
+ h_in = th.split(hidden_state, self.args.asn_hidden_size, dim=-1)
+ h_in_env = h_in[0].reshape(-1, self.args.asn_hidden_size)
+ h_in_enemies = [_h.reshape(-1, self.args.asn_hidden_size) for _h in h_in[1:]]
+
+ env_hidden_1 = F.relu(self.env_info_fc1(inputs))
+ env_hidden_2 = self.env_info_fc2(env_hidden_1)
+ h_env = self.env_info_rnn3(env_hidden_2, h_in_env)
+
+ wo_action_fc_Q = self.wo_action_fc(h_env)
+
+ enemies_hiddent_1 = [F.relu(self.enemies_info_fc1(enemy_info)) for enemy_info in enemies_feats]
+ enemies_hiddent_2 = [self.enemies_info_fc2(enemy_info) for enemy_info in enemies_hiddent_1]
+ enemies_h_hiddent_3 = [self.enemies_info_rnn3(enemy_info, enemy_h) for enemy_info, enemy_h in zip(enemies_hiddent_2, h_in_enemies)]
+
+ attack_enemy_id_Q = [th.sum(h_env * enemy_info, dim=-1, keepdim=True) for enemy_info in enemies_h_hiddent_3]
+
+ q = th.cat([wo_action_fc_Q, *attack_enemy_id_Q], dim=-1)
+ hidden_state = th.cat([h_env, *enemies_h_hiddent_3], dim=-1)
+ return q.view(b, a, -1), hidden_state.view(b, a, -1)
diff --git a/src/modules/agents/deepset_hyper_rnn_agent.py b/src/modules/agents/deepset_hyper_rnn_agent.py
new file mode 100644
index 0000000..ef09b6a
--- /dev/null
+++ b/src/modules/agents/deepset_hyper_rnn_agent.py
@@ -0,0 +1,100 @@
+import torch as th
+import torch.nn as nn
+import torch.nn.functional as F
+
+
+class DeepSetHyperRNNAgent(nn.Module):
+ def __init__(self, input_shape, args):
+ super(DeepSetHyperRNNAgent, self).__init__()
+ self.args = args
+ self.n_agents = args.n_agents
+ self.n_allies = args.n_allies
+ self.n_enemies = args.n_enemies
+ self.n_actions = args.n_actions
+
+ # [4 + 1, (6, 5), (4, 5)], take 5m_vs_6m for example
+ self.own_feats_dim, self.enemy_feats_dim, self.ally_feats_dim = input_shape
+ self.enemy_feats_dim = self.enemy_feats_dim[-1]
+ self.ally_feats_dim = self.ally_feats_dim[-1]
+
+ if self.args.obs_agent_id:
+ # embedding table for agent_id
+ self.agent_id_embedding = th.nn.Embedding(self.n_agents, self.args.rnn_hidden_dim)
+
+ if self.args.obs_last_action:
+ # embedding table for action id
+ self.action_id_embedding = th.nn.Embedding(self.n_actions, self.args.rnn_hidden_dim)
+
+ # Own features
+ self.fc1_own = nn.Linear(self.own_feats_dim, args.rnn_hidden_dim, bias=True) # only one bias is OK
+
+ # Ally features
+ self.fc1_ally = nn.Linear(self.ally_feats_dim, args.rnn_hidden_dim, bias=False) # only one bias is OK
+
+ # Enemy features
+ self.fc1_enemy = nn.Linear(self.enemy_feats_dim, args.rnn_hidden_dim, bias=False) # only one bias is OK
+
+ self.rnn = nn.GRUCell(args.rnn_hidden_dim, args.rnn_hidden_dim)
+
+ self.fc2_normal_actions = nn.Linear(args.rnn_hidden_dim, args.output_normal_actions) # (no_op, stop, up, down, right, left)
+ # Multiple entities (use hyper net to process these features to ensure permutation invariant)
+ self.hyper_fc2_w_and_b_attack_actions = nn.Sequential(
+ nn.Linear(self.enemy_feats_dim, args.hpn_hyper_dim),
+ nn.ReLU(inplace=True),
+ nn.Linear(args.hpn_hyper_dim, args.rnn_hidden_dim * 1 + 1)
+ ) # output shape: rnn_hidden_dim * 1 + 1
+
+ def init_hidden(self):
+ # make hidden states on same device as model
+ return self.fc1_own.weight.new(1, self.args.rnn_hidden_dim).zero_()
+
+ def forward(self, inputs, hidden_state=None):
+ # [bs * n_agents, mv_fea_dim+own_fea_dim], [bs * n_agents * n_enemies, enemy_fea_dim], [bs * n_agents * n_allies, ally_fea_dim]
+ bs, own_feats_t, enemy_feats_t, ally_feats_t, embedding_indices = inputs
+
+ # (1) Own feature
+ embedding_own = self.fc1_own(own_feats_t.reshape(-1, self.own_feats_dim))
+ # (2) ID embeddings
+ if self.args.obs_agent_id:
+ agent_indices = embedding_indices[0]
+ embedding_own = embedding_own + self.agent_id_embedding(agent_indices).view(-1, self.args.rnn_hidden_dim)
+ if self.args.obs_last_action:
+ last_action_indices = embedding_indices[-1]
+ if last_action_indices is not None: # t != 0
+ embedding_own = embedding_own + self.action_id_embedding(last_action_indices).view(
+ -1, self.args.rnn_hidden_dim)
+
+ # (3) Enemy feature
+ embedding_enemies = self.fc1_enemy(enemy_feats_t).view(
+ bs * self.n_agents, self.n_enemies, self.args.rnn_hidden_dim
+ ) # [bs * n_agents, n_enemies, rnn_hidden_dim]
+ embedding_enemies = embedding_enemies.sum(dim=1, keepdim=False) # [bs * n_agents, rnn_hidden_dim]
+
+ # (4) Ally features
+ embedding_allies = self.fc1_ally(ally_feats_t).view(
+ bs * self.n_agents, self.n_allies, self.args.rnn_hidden_dim
+ ) # [bs * n_agents, n_enemies, rnn_hidden_dim]
+ embedding_allies = embedding_allies.sum(dim=1, keepdim=False) # [bs * n_agents, rnn_hidden_dim]
+ aggregated_embedding = embedding_own + embedding_enemies + embedding_allies # [bs * n_agents, rnn_hidden_dim]
+
+ x = F.relu(aggregated_embedding, inplace=True)
+ if hidden_state is not None:
+ hidden_state = hidden_state.reshape(-1, self.args.rnn_hidden_dim)
+ h = self.rnn(x, hidden_state)
+
+ # Q-values of normal actions
+ q_normal = self.fc2_normal_actions(h) # [bs * n_agents, 6]
+
+ # Q-values of attack actions
+ fc2_w_and_b_attack = self.hyper_fc2_w_and_b_attack_actions(enemy_feats_t).view(
+ bs * self.n_agents, self.n_enemies, self.args.rnn_hidden_dim + 1
+ ).transpose(-2, -1) # [bs*n_agents, n_enemies, rnn_hidden_dim+1] -> [bs*n_agents, rnn_hidden_dim+1, n_enemies]
+ fc2_w_attack = fc2_w_and_b_attack[:, :-1] # [bs * n_agents, rnn_hidden_dim, n_enemies]
+ fc2_b_attack = fc2_w_and_b_attack[:, -1] # [bs * n_agents, n_enemies]
+ # [bs * n_agents, 1, rnn_hidden_dim] * [bs * n_agents, rnn_hidden_dim, n_enemies] = [bs * n_agents, 1, n_enemies]
+ q_attack = th.matmul(h.unsqueeze(1), fc2_w_attack).squeeze(1) + fc2_b_attack # [bs * n_agents, n_enemies]
+
+ # Concat 2 types of Q-values
+ q = th.cat((q_normal, q_attack), dim=1) # [bs * n_agents, 6 + n_enemies]
+
+ return q.view(bs, self.n_agents, -1), h.view(bs, self.n_agents, -1)
\ No newline at end of file
diff --git a/src/modules/agents/deepset_rnn_agent.py b/src/modules/agents/deepset_rnn_agent.py
new file mode 100644
index 0000000..8e642ea
--- /dev/null
+++ b/src/modules/agents/deepset_rnn_agent.py
@@ -0,0 +1,81 @@
+import torch as th
+import torch.nn as nn
+import torch.nn.functional as F
+
+
+class DeepSetRNNAgent(nn.Module):
+ def __init__(self, input_shape, args):
+ super(DeepSetRNNAgent, self).__init__()
+ self.args = args
+ self.n_agents = args.n_agents
+ self.n_allies = args.n_allies
+ self.n_enemies = args.n_enemies
+ self.n_actions = args.n_actions
+
+ # [4 + 1, (6, 5), (4, 5)], take 5m_vs_6m for example
+ self.own_feats_dim, self.enemy_feats_dim, self.ally_feats_dim = input_shape
+ self.enemy_feats_dim = self.enemy_feats_dim[-1]
+ self.ally_feats_dim = self.ally_feats_dim[-1]
+
+ if self.args.obs_agent_id:
+ # embedding table for agent_id
+ self.agent_id_embedding = th.nn.Embedding(self.n_agents, self.args.rnn_hidden_dim)
+
+ if self.args.obs_last_action:
+ # embedding table for action id
+ self.action_id_embedding = th.nn.Embedding(self.n_actions, self.args.rnn_hidden_dim)
+
+ # Own features
+ self.fc1_own = nn.Linear(self.own_feats_dim, args.rnn_hidden_dim, bias=True) # only one bias is OK
+
+ # Ally features
+ self.fc1_ally = nn.Linear(self.ally_feats_dim, args.rnn_hidden_dim, bias=False) # only one bias is OK
+
+ # Enemy features
+ self.fc1_enemy = nn.Linear(self.enemy_feats_dim, args.rnn_hidden_dim, bias=False) # only one bias is OK
+
+ self.rnn = nn.GRUCell(args.rnn_hidden_dim, args.rnn_hidden_dim)
+ self.fc2 = nn.Linear(args.rnn_hidden_dim, args.n_actions)
+
+ # print(self.fc1_ally.weight.data.mean(), self.fc1_ally.weight.data.var())
+ # print(self.fc1_enemy.weight.data.mean(), self.fc1_enemy.weight.data.var())
+
+ def init_hidden(self):
+ # make hidden states on same device as model
+ return self.fc1_own.weight.new(1, self.args.rnn_hidden_dim).zero_()
+
+ def forward(self, inputs, hidden_state=None):
+ # [bs * n_agents, mv_fea_dim+own_fea_dim], [bs * n_agents * n_enemies, enemy_fea_dim], [bs * n_agents * n_allies, ally_fea_dim]
+ bs, own_feats_t, enemy_feats_t, ally_feats_t, embedding_indices = inputs
+
+ # (1) Own feature
+ embedding_own = self.fc1_own(own_feats_t.reshape(-1, self.own_feats_dim))
+ # (2) ID embeddings
+ if self.args.obs_agent_id:
+ agent_indices = embedding_indices[0]
+ embedding_own = embedding_own + self.agent_id_embedding(agent_indices).view(-1, self.args.rnn_hidden_dim)
+ if self.args.obs_last_action:
+ last_action_indices = embedding_indices[-1]
+ if last_action_indices is not None: # t != 0
+ embedding_own = embedding_own + self.action_id_embedding(last_action_indices).view(
+ -1, self.args.rnn_hidden_dim)
+
+ # (3) Enemy feature
+ embedding_enemies = self.fc1_enemy(enemy_feats_t).view(
+ bs * self.n_agents, self.n_enemies, self.args.rnn_hidden_dim
+ ) # [bs * n_agents, n_enemies, rnn_hidden_dim]
+ embedding_enemies = embedding_enemies.sum(dim=1, keepdim=False) # [bs * n_agents, rnn_hidden_dim]
+
+ # (4) Ally features
+ embedding_allies = self.fc1_ally(ally_feats_t).view(
+ bs * self.n_agents, self.n_allies, self.args.rnn_hidden_dim
+ ) # [bs * n_agents, n_enemies, rnn_hidden_dim]
+ embedding_allies = embedding_allies.sum(dim=1, keepdim=False) # [bs * n_agents, rnn_hidden_dim]
+ aggregated_embedding = embedding_own + embedding_enemies + embedding_allies # [bs * n_agents, rnn_hidden_dim]
+
+ x = F.relu(aggregated_embedding, inplace=True)
+ if hidden_state is not None:
+ hidden_state = hidden_state.reshape(-1, self.args.rnn_hidden_dim)
+ h = self.rnn(x, hidden_state)
+ q = self.fc2(h)
+ return q.view(bs, self.n_agents, -1), h.view(bs, self.n_agents, -1)
\ No newline at end of file
diff --git a/src/modules/agents/gnn_rnn_agent.py b/src/modules/agents/gnn_rnn_agent.py
new file mode 100644
index 0000000..21241ab
--- /dev/null
+++ b/src/modules/agents/gnn_rnn_agent.py
@@ -0,0 +1,156 @@
+import torch as th
+import torch.nn as nn
+import torch.nn.functional as F
+
+
+class GraphConvLayer(nn.Module):
+ """Implements a GCN layer."""
+
+ def __init__(self, input_dim, output_dim, n_nodes):
+ super(GraphConvLayer, self).__init__()
+ self.input_dim = input_dim
+ self.output_dim = output_dim
+ self.n_nodes = n_nodes
+
+ self.lin_layer_neighbor = nn.Linear(input_dim, output_dim)
+ self.lin_layer_self = nn.Linear(input_dim, output_dim)
+
+ def forward(self, inputs):
+ input_feature, adjacent_matrix = inputs
+ # [N, N] * [bs, N, fea_dim]
+ neighbors = th.matmul(adjacent_matrix, self.lin_layer_neighbor(input_feature)) # sum aggregation
+ neighbors = F.relu(neighbors, inplace=True)
+
+ node_feats = self.lin_layer_self(input_feature) # node features
+ node_feats = F.relu(node_feats, inplace=True)
+ out = (node_feats + neighbors) / self.n_nodes # mean
+ return out
+
+ def __repr__(self):
+ return self.__class__.__name__ + ' (' \
+ + str(self.input_dim) + ' -> ' \
+ + str(self.output_dim) + ')'
+
+
+class GNN(nn.Module):
+ """
+ A graph net that is used to pre-process input components, and solve the order issue.
+ gnn_rnn 35.404K for 5m_vs_6m
+ """
+
+ def __init__(self, fea_dim, n_nodes, hidden_size, layer_num=2, out_pool_type='avg'):
+ super(GNN, self).__init__()
+ self.fea_dim = fea_dim
+ self.n_nodes = n_nodes
+ self.hidden_size = hidden_size
+ self.layer_num = layer_num
+ self.out_pool_type = out_pool_type
+
+ # Adjacent Matrix, assumes a fully connected graph.
+ self.register_buffer('adj', (th.ones(n_nodes, n_nodes) - th.eye(n_nodes)))
+
+ # GNNs
+ GNN_layers = []
+ previous_out_dim = fea_dim
+ for _ in range(self.layer_num):
+ GNN_layers.append(GraphConvLayer(input_dim=previous_out_dim, output_dim=hidden_size, n_nodes=n_nodes))
+ previous_out_dim = hidden_size
+ self.gnn_layers = nn.Sequential(*GNN_layers)
+
+ def forward(self, x):
+ # GNNs
+ out = self.gnn_layers([x, self.adj])
+
+ # Pooling
+ if self.out_pool_type == 'avg':
+ ret = out.mean(dim=1, keepdim=False) # Pooling over the node dimension.
+ elif self.out_pool_type == 'max':
+ ret, _ = out.max(dim=1, keepdim=False)
+ else:
+ raise NotImplementedError
+ return ret
+
+
+class GnnRNNAgent(nn.Module):
+ def __init__(self, input_shape, args):
+ super(GnnRNNAgent, self).__init__()
+ self.args = args
+ self.n_agents = args.n_agents
+ self.n_allies = args.n_allies
+ self.n_enemies = args.n_enemies
+ self.n_actions = args.n_actions
+
+ # [4 + 1, (6, 5), (4, 5)], take 5m_vs_6m for example
+ self.own_feats_dim, self.enemy_feats_dim, self.ally_feats_dim = input_shape
+ self.enemy_feats_dim = self.enemy_feats_dim[-1]
+ self.ally_feats_dim = self.ally_feats_dim[-1]
+
+ # (1) To transform all kinds of features into the same dimension.
+ if self.args.obs_agent_id:
+ # embedding table for agent_id
+ self.agent_id_embedding = th.nn.Embedding(self.n_agents, self.args.rnn_hidden_dim)
+ if self.args.obs_last_action:
+ # embedding table for action id
+ self.action_id_embedding = th.nn.Embedding(self.n_actions, self.args.rnn_hidden_dim)
+ # Own features
+ self.fc1_own = nn.Linear(self.own_feats_dim, args.rnn_hidden_dim, bias=True) # only one bias is OK
+ # Ally features
+ self.fc1_ally = nn.Linear(self.ally_feats_dim, args.rnn_hidden_dim, bias=False) # only one bias is OK
+ # Enemy features
+ self.fc1_enemy = nn.Linear(self.enemy_feats_dim, args.rnn_hidden_dim, bias=False) # only one bias is OK
+
+ # (2) GNN
+ self.gnn = GNN(fea_dim=args.rnn_hidden_dim, n_nodes=self.n_agents + self.n_enemies,
+ hidden_size=args.rnn_hidden_dim, layer_num=args.gnn_layer_num, out_pool_type='avg')
+
+ # (3) RNN and output
+ self.rnn = nn.GRUCell(args.rnn_hidden_dim, args.rnn_hidden_dim)
+ self.fc2 = nn.Linear(args.rnn_hidden_dim, args.n_actions)
+
+ # print(self.fc1_ally.weight.data.mean(), self.fc1_ally.weight.data.var())
+ # print(self.fc1_enemy.weight.data.mean(), self.fc1_enemy.weight.data.var())
+
+ def init_hidden(self):
+ # make hidden states on same device as model
+ return self.fc1_own.weight.new(1, self.args.rnn_hidden_dim).zero_()
+
+ def forward(self, inputs, hidden_state=None):
+ # [bs * n_agents, mv_fea_dim+own_fea_dim], [bs * n_agents * n_enemies, enemy_fea_dim], [bs * n_agents * n_allies, ally_fea_dim]
+ bs, own_feats_t, enemy_feats_t, ally_feats_t, embedding_indices = inputs
+
+ # %%%%%%%%%% To transform all kinds of features into the same dimension. %%%%%%%%%%
+ # (1) Own feature
+ embedding_own = self.fc1_own(own_feats_t) # [bs * n_agents, rnn_hidden_dim]
+ # (2) ID embeddings
+ if self.args.obs_agent_id:
+ agent_indices = embedding_indices[0]
+ # [bs * n_agents, rnn_hidden_dim]
+ embedding_own = embedding_own + self.agent_id_embedding(agent_indices).view(-1, self.args.rnn_hidden_dim)
+ if self.args.obs_last_action:
+ last_action_indices = embedding_indices[-1]
+ if last_action_indices is not None: # t != 0
+ # [bs * n_agents, rnn_hidden_dim]
+ embedding_own = embedding_own + self.action_id_embedding(last_action_indices).view(
+ -1, self.args.rnn_hidden_dim)
+ embedding_own = embedding_own.unsqueeze(dim=1) # [bs * n_agents, 1, rnn_hidden_dim]
+ # (3) Enemy feature
+ embedding_enemies = self.fc1_enemy(enemy_feats_t).view(
+ bs * self.n_agents, self.n_enemies, self.args.rnn_hidden_dim
+ ) # [bs * n_agents, n_enemies, rnn_hidden_dim]
+ # (4) Ally features
+ embedding_allies = self.fc1_ally(ally_feats_t).view(
+ bs * self.n_agents, self.n_allies, self.args.rnn_hidden_dim
+ ) # [bs * n_agents, n_allies, rnn_hidden_dim]
+
+ # [bs * n_agents, 1+n_allies+n_enemies, rnn_hidden_dim]
+ fea_embeddings = th.cat([embedding_own, embedding_allies, embedding_enemies], dim=1)
+ fea_embeddings = F.relu(fea_embeddings, inplace=True)
+ # %%%%%%%%%% To transform all kinds of features into the same dimension. %%%%%%%%%%
+
+ x = self.gnn(fea_embeddings) # [bs * n_agents, rnn_hidden_dim]
+
+ if hidden_state is not None:
+ hidden_state = hidden_state.reshape(-1, self.args.rnn_hidden_dim)
+ h = self.rnn(x, hidden_state)
+ q = self.fc2(h)
+ return q.view(bs, self.n_agents, -1), h.view(bs, self.n_agents, -1)
diff --git a/src/modules/agents/hpn_rnn_agent.py b/src/modules/agents/hpn_rnn_agent.py
new file mode 100644
index 0000000..e550b64
--- /dev/null
+++ b/src/modules/agents/hpn_rnn_agent.py
@@ -0,0 +1,244 @@
+import torch as th
+import torch.nn as nn
+import torch.nn.functional as F
+from torch.nn.parameter import Parameter
+
+
+def get_activation_func(name, hidden_dim):
+ """
+ 'relu'
+ 'tanh'
+ 'leaky_relu'
+ 'elu'
+ 'prelu'
+ :param name:
+ :return:
+ """
+ if name == "relu":
+ return nn.ReLU(inplace=True)
+ elif name == "tanh":
+ return nn.Tanh()
+ elif name == "leaky_relu":
+ return nn.LeakyReLU(negative_slope=0.01, inplace=True)
+ elif name == "elu":
+ return nn.ELU(alpha=1., inplace=True)
+ elif name == 'prelu':
+ return nn.PReLU(num_parameters=hidden_dim, init=0.25)
+
+
+class Hypernet(nn.Module):
+ def __init__(self, input_dim, hidden_dim, main_input_dim, main_output_dim, activation_func, n_heads):
+ super(Hypernet, self).__init__()
+
+ self.n_heads = n_heads
+ # the output dim of the hypernet
+ output_dim = main_input_dim * main_output_dim
+ # the output of the hypernet will be reshaped to [main_input_dim, main_output_dim]
+ self.main_input_dim = main_input_dim
+ self.main_output_dim = main_output_dim
+
+ self.multihead_nn = nn.Sequential(
+ nn.Linear(input_dim, hidden_dim),
+ get_activation_func(activation_func, hidden_dim),
+ nn.Linear(hidden_dim, output_dim * self.n_heads),
+ )
+
+ def forward(self, x):
+ # [..., main_output_dim + main_output_dim + ... + main_output_dim]
+ # [bs, main_input_dim, n_heads * main_output_dim]
+ return self.multihead_nn(x).view([-1, self.main_input_dim, self.main_output_dim * self.n_heads])
+
+
+class Merger(nn.Module):
+ def __init__(self, head, fea_dim):
+ super(Merger, self).__init__()
+ self.head = head
+ if head > 1:
+ self.weight = Parameter(th.Tensor(1, head, fea_dim).fill_(1.))
+ self.softmax = nn.Softmax(dim=1)
+
+ def forward(self, x):
+ """
+ :param x: [bs, n_head, fea_dim]
+ :return: [bs, fea_dim]
+ """
+ if self.head > 1:
+ return th.sum(self.softmax(self.weight) * x, dim=1, keepdim=False)
+ else:
+ return th.squeeze(x, dim=1)
+
+
+class HPN_RNNAgent(nn.Module):
+ def __init__(self, input_shape, args):
+ super(HPN_RNNAgent, self).__init__()
+ self.args = args
+ self.n_agents = args.n_agents
+ self.n_allies = args.n_allies
+ self.n_enemies = args.n_enemies
+ self.n_actions = args.n_actions
+ self.n_heads = args.hpn_head_num
+ self.rnn_hidden_dim = args.rnn_hidden_dim
+
+ # [4 + 1, (6, 5), (4, 5)]
+ self.own_feats_dim, self.enemy_feats_dim, self.ally_feats_dim = input_shape
+ self.enemy_feats_dim = self.enemy_feats_dim[-1] # [n_enemies, feat_dim]
+ self.ally_feats_dim = self.ally_feats_dim[-1] # [n_allies, feat_dim]
+
+ if self.args.obs_agent_id:
+ # embedding table for agent_id
+ self.agent_id_embedding = th.nn.Embedding(self.n_agents, self.rnn_hidden_dim)
+
+ if self.args.obs_last_action:
+ # embedding table for action id
+ self.action_id_embedding = th.nn.Embedding(self.n_actions, self.rnn_hidden_dim)
+
+ # Unique Features (do not need hyper net)
+ self.fc1_own = nn.Linear(self.own_feats_dim, self.rnn_hidden_dim, bias=True) # only one bias is OK
+
+ # %%%%%%%%%%%%%%%%%%%%%% Hypernet-based API input layer %%%%%%%%%%%%%%%%%%%%
+ # Multiple entities (use hyper net to process these features to ensure permutation invariant)
+ self.hyper_input_w_enemy = Hypernet(
+ input_dim=self.enemy_feats_dim, hidden_dim=args.hpn_hyper_dim,
+ main_input_dim=self.enemy_feats_dim, main_output_dim=self.rnn_hidden_dim,
+ activation_func=args.hpn_hyper_activation, n_heads=self.n_heads
+ ) # output shape: (enemy_feats_dim * self.rnn_hidden_dim)
+ self.hyper_input_w_ally = Hypernet(
+ input_dim=self.ally_feats_dim, hidden_dim=args.hpn_hyper_dim,
+ main_input_dim=self.ally_feats_dim, main_output_dim=self.rnn_hidden_dim,
+ activation_func=args.hpn_hyper_activation, n_heads=self.n_heads
+ ) # output shape: ally_feats_dim * rnn_hidden_dim
+
+ # self.unify_input_heads = nn.Linear(self.rnn_hidden_dim * self.n_heads, self.rnn_hidden_dim)
+ self.unify_input_heads = Merger(self.n_heads, self.rnn_hidden_dim)
+
+ self.rnn = nn.GRUCell(self.rnn_hidden_dim, self.rnn_hidden_dim)
+
+ self.output_normal_actions = nn.Linear(self.rnn_hidden_dim, args.output_normal_actions) # (no_op, stop, up, down, right, left)
+
+ # %%%%%%%%%%%%%%%%%%%%%% Hypernet-based APE output layer %%%%%%%%%%%%%%%%%%%%
+ # Multiple entities (use hyper net to process these features to ensure permutation invariant)
+ self.hyper_output_w_attack_action = Hypernet(
+ input_dim=self.enemy_feats_dim, hidden_dim=args.hpn_hyper_dim,
+ main_input_dim=self.rnn_hidden_dim, main_output_dim=1,
+ activation_func=args.hpn_hyper_activation, n_heads=self.n_heads
+ ) # output shape: rnn_hidden_dim * 1
+ self.hyper_output_b_attack_action = Hypernet(
+ input_dim=self.enemy_feats_dim, hidden_dim=args.hpn_hyper_dim,
+ main_input_dim=1, main_output_dim=1,
+ activation_func=args.hpn_hyper_activation, n_heads=self.n_heads
+ ) # output shape: 1
+ # self.unify_output_heads = nn.Linear(self.n_heads, 1)
+ self.unify_output_heads = Merger(self.n_heads, 1)
+
+ if self.args.map_type == "MMM":
+ assert self.n_enemies >= self.n_agents, "For MMM map, for the reason that the 'attack' and 'rescue' use the same ids in SMAC, n_enemies must >= n_agents"
+ self.hyper_output_w_rescue_action = Hypernet(
+ input_dim=self.ally_feats_dim, hidden_dim=args.hpn_hyper_dim,
+ main_input_dim=self.rnn_hidden_dim, main_output_dim=1,
+ activation_func=args.hpn_hyper_activation, n_heads=self.n_heads
+ ) # output shape: rnn_hidden_dim * 1
+ self.hyper_output_b_rescue_action = Hypernet(
+ input_dim=self.ally_feats_dim, hidden_dim=args.hpn_hyper_dim,
+ main_input_dim=1, main_output_dim=1,
+ activation_func=args.hpn_hyper_activation, n_heads=self.n_heads
+ ) # output shape: 1
+ self.unify_rescue_output_heads = Merger(self.n_heads, 1)
+
+ def init_hidden(self):
+ # make hidden states on same device as model
+ return self.fc1_own.weight.new(1, self.rnn_hidden_dim).zero_()
+
+ def forward(self, inputs, hidden_state):
+ # [bs * n_agents, mv_fea_dim], [bs * n_agents * n_enemies, enemy_fea_dim], [bs * n_agents * n_allies, ally_fea_dim]
+ bs, own_feats_t, enemy_feats_t, ally_feats_t, embedding_indices = inputs
+
+ # (1) Own feature
+ embedding_own = self.fc1_own(own_feats_t) # [bs * n_agents, rnn_hidden_dim]
+
+ # (2) ID embeddings
+ if self.args.obs_agent_id:
+ agent_indices = embedding_indices[0]
+ # [bs * n_agents, rnn_hidden_dim * head]
+ embedding_own = embedding_own + self.agent_id_embedding(agent_indices).view(
+ -1, self.rnn_hidden_dim)
+ if self.args.obs_last_action:
+ last_action_indices = embedding_indices[-1]
+ if last_action_indices is not None: # t != 0
+ # [bs * n_agents, rnn_hidden_dim * head]
+ embedding_own = embedding_own + self.action_id_embedding(last_action_indices).view(
+ -1, self.rnn_hidden_dim)
+
+ # (3) Enemy feature: [bs * n_agents * n_enemies, enemy_fea_dim] -> [bs * n_agents * n_enemies, enemy_feats_dim, rnn_hidden_dim * n_heads]
+ input_w_enemy = self.hyper_input_w_enemy(enemy_feats_t)
+ # [bs * n_agents * n_enemies, 1, enemy_fea_dim] * [bs * n_agents * n_enemies, enemy_fea_dim, rnn_hidden_dim * head] = [bs * n_agents * n_enemies, 1, rnn_hidden_dim * head]
+ embedding_enemies = th.matmul(enemy_feats_t.unsqueeze(1), input_w_enemy).view(
+ bs * self.n_agents, self.n_enemies, self.n_heads, self.rnn_hidden_dim
+ ) # [bs * n_agents, n_enemies, n_head, rnn_hidden_dim]
+ embedding_enemies = embedding_enemies.sum(dim=1, keepdim=False) # [bs * n_agents, n_head, rnn_hidden_dim]
+
+ # (4) Ally features: [bs * n_agents * n_allies, ally_fea_dim] -> [bs * n_agents * n_allies, ally_feats_dim, rnn_hidden_dim * n_heads]
+ input_w_ally = self.hyper_input_w_ally(ally_feats_t)
+ # [bs * n_agents * n_allies, 1, ally_fea_dim] * [bs * n_agents * n_allies, ally_fea_dim, rnn_hidden_dim * head] = [bs * n_agents * n_allies, 1, rnn_hidden_dim * head]
+ embedding_allies = th.matmul(ally_feats_t.unsqueeze(1), input_w_ally).view(
+ bs * self.n_agents, self.n_allies, self.n_heads, self.rnn_hidden_dim
+ ) # [bs * n_agents, n_allies, n_head, rnn_hidden_dim]
+ embedding_allies = embedding_allies.sum(dim=1, keepdim=False) # [bs * n_agents, n_head, rnn_hidden_dim]
+ # Final embedding, merge multiple heads into one. -> [bs * n_agents, n_head, rnn_hidden_dim]
+ embedding = embedding_own + self.unify_input_heads(
+ embedding_enemies + embedding_allies
+ )
+
+ x = F.relu(embedding, inplace=True)
+ h_in = hidden_state.reshape(-1, self.rnn_hidden_dim)
+ hh = self.rnn(x, h_in) # [bs * n_agents, rnn_hidden_dim]
+
+ # Q-values of normal actions
+ q_normal = self.output_normal_actions(hh).view(bs, self.n_agents, -1) # [bs, n_agents, 6]
+
+ # Q-values of attack actions: [bs * n_agents * n_enemies, enemy_fea_dim] -> [bs * n_agents * n_enemies, rnn_hidden_dim, 1 * n_heads]
+ output_w_attack = self.hyper_output_w_attack_action(enemy_feats_t).view(
+ bs * self.n_agents, self.n_enemies, self.rnn_hidden_dim, self.n_heads
+ ).transpose(1, 2).reshape( # -> [bs * n_agents, rnn_hidden_dim, n_enemies, n_heads]
+ bs * self.n_agents, self.rnn_hidden_dim, self.n_enemies * self.n_heads
+ ) # [bs * n_agents, rnn_hidden_dim, n_enemies * heads]
+ # b: [bs * n_agents * n_enemies, enemy_fea_dim] -> [bs * n_agents * n_enemies, 1, n_heads]
+ output_b_attack = self.hyper_output_b_attack_action(enemy_feats_t).view(
+ bs * self.n_agents, self.n_enemies * self.n_heads
+ ) # -> [bs * n_agents, n_enemies * head]
+
+ # [bs * n_agents, 1, rnn_hidden_dim] * [bs * n_agents, rnn_hidden_dim, n_enemies * head] = [bs * n_agents, 1, n_enemies * head]
+ # -> # [bs * n_agents, n_enemies * head] -> [bs * n_agents * n_enemies, head, 1]
+ q_attacks = (th.matmul(hh.unsqueeze(1), output_w_attack).squeeze(1) + output_b_attack).view(-1, self.n_heads,
+ 1)
+ q_attack = self.unify_output_heads(q_attacks).view( # [bs * n_agents * n_enemies, 1]
+ bs, self.n_agents, self.n_enemies
+ ) # [bs, n_agents, n_enemies]
+
+ # %%%%%%%%%%%%%%% 'rescue' actions for map_type == "MMM" %%%%%%%%%%%%%%%
+ if self.args.map_type == "MMM":
+ output_w_rescue = self.hyper_output_w_rescue_action(ally_feats_t).view(
+ bs * self.n_agents, self.n_allies, self.rnn_hidden_dim, self.n_heads
+ ).transpose(1, 2).reshape( # -> [bs * n_agents, rnn_hidden_dim, n_allies, n_heads]
+ bs * self.n_agents, self.rnn_hidden_dim, self.n_allies * self.n_heads
+ ) # [bs * n_agents, rnn_hidden_dim, n_allies * heads]
+ # b: [bs * n_agents * n_allies, ally_fea_dim] -> [bs * n_agents * n_allies, 1, n_heads]
+ output_b_rescue = self.hyper_output_b_rescue_action(ally_feats_t).view(
+ bs * self.n_agents, self.n_allies * self.n_heads
+ ) # -> [bs * n_agents, n_allies * head]
+
+ # [bs * n_agents, 1, rnn_hidden_dim] * [bs * n_agents, rnn_hidden_dim, n_allies * head] = [bs * n_agents, 1, n_allies * head]
+ # -> # [bs * n_agents, n_allies * head] -> [bs * n_agents * n_allies, head, 1]
+ q_rescue = (th.matmul(hh.unsqueeze(1), output_w_rescue).squeeze(1) + output_b_rescue).view(
+ -1, self.n_heads, 1)
+ q_rescue = self.unify_rescue_output_heads(q_rescue).view( # [bs * n_agents * n_allies, 1]
+ bs, self.n_agents, self.n_allies
+ ) # [bs, n_agents, n_allies]
+ # For the reason that medivac is the last indexed agent, so the rescue action idx -> [0, n_allies-1]
+ right_padding = th.ones_like(q_attack[:, -1:, self.n_allies:], requires_grad=False) * (-9999999)
+ modified_q_attack_of_medivac = th.cat([q_rescue[:, -1:, :], right_padding], dim=-1)
+ # Merge
+ q_attack = th.cat([q_attack[:, :-1], modified_q_attack_of_medivac], dim=1)
+
+ # Concat 2 types of Q-values
+ q = th.cat((q_normal, q_attack), dim=-1) # [bs, n_agents, 6 + n_enemies]
+ return q.view(bs, self.n_agents, -1), hh.view(bs, self.n_agents, -1)
diff --git a/src/modules/agents/hpns_rnn_agent.py b/src/modules/agents/hpns_rnn_agent.py
new file mode 100644
index 0000000..0a6a4bc
--- /dev/null
+++ b/src/modules/agents/hpns_rnn_agent.py
@@ -0,0 +1,218 @@
+import torch as th
+import torch.nn as nn
+import torch.nn.functional as F
+import math
+from torch.nn.parameter import Parameter
+
+
+def kaiming_uniform_(tensor_w, tensor_b, mode='fan_in', gain=12 ** (-0.5)):
+ fan = nn.init._calculate_correct_fan(tensor_w.data, mode)
+ std = gain / math.sqrt(fan)
+ bound_w = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
+ bound_b = 1 / math.sqrt(fan)
+ with th.no_grad():
+ tensor_w.data.uniform_(-bound_w, bound_w)
+ if tensor_b is not None:
+ tensor_b.data.uniform_(-bound_b, bound_b)
+
+
+class Merger(nn.Module):
+ def __init__(self, head, fea_dim):
+ super(Merger, self).__init__()
+ self.head = head
+ if head > 1:
+ self.weight = Parameter(th.Tensor(1, head, fea_dim).fill_(1.))
+ self.softmax = nn.Softmax(dim=1)
+
+ def forward(self, x):
+ """
+ :param x: [bs, n_head, fea_dim]
+ :return: [bs, fea_dim]
+ """
+ if self.head > 1:
+ return th.sum(self.softmax(self.weight) * x, dim=1, keepdim=False)
+ else:
+ return th.squeeze(x, dim=1)
+
+
+class HPNS_RNNAgent(nn.Module):
+ def __init__(self, input_shape, args):
+ super(HPNS_RNNAgent, self).__init__()
+ self.args = args
+ self.n_agents = args.n_agents
+ self.n_allies = args.n_allies
+ self.n_enemies = args.n_enemies
+ self.n_actions = args.n_actions
+ self.n_heads = args.hpn_head_num
+ self.rnn_hidden_dim = args.rnn_hidden_dim
+
+ # [4 + 1, (6, 5), (4, 5)]
+ self.own_feats_dim, self.enemy_feats_dim, self.ally_feats_dim = input_shape
+ self.enemy_feats_dim = self.enemy_feats_dim[-1] # [n_enemies, feat_dim]
+ self.ally_feats_dim = self.ally_feats_dim[-1] # [n_allies, feat_dim]
+
+ if self.args.obs_agent_id:
+ # embedding table for agent_id
+ self.agent_id_embedding = th.nn.Embedding(self.n_agents, self.rnn_hidden_dim)
+
+ if self.args.obs_last_action:
+ # embedding table for action id
+ self.action_id_embedding = th.nn.Embedding(self.n_actions, self.rnn_hidden_dim)
+
+ # Unique Features (do not need hyper net)
+ self.fc1_own = nn.Linear(self.own_feats_dim, self.rnn_hidden_dim, bias=True) # only one bias is OK
+
+ # Multiple entities (use hyper net to process these features to ensure permutation invariant)
+ self.hyper_enemy = nn.Sequential(
+ nn.Linear(self.enemy_feats_dim, args.hpn_hyper_dim),
+ nn.ReLU(inplace=True),
+ nn.Linear(args.hpn_hyper_dim, ((self.enemy_feats_dim + 1) * self.rnn_hidden_dim + 1) * self.n_heads)
+ ) # output shape: (enemy_feats_dim * rnn_hidden_dim + rnn_hidden_dim + 1)
+
+ if self.args.map_type == "MMM":
+ assert self.n_enemies >= self.n_agents, "For MMM map, for the reason that the 'attack' and 'rescue' use the same ids in SMAC, n_enemies must >= n_agents"
+ self.hyper_ally = nn.Sequential(
+ nn.Linear(self.ally_feats_dim, args.hpn_hyper_dim),
+ nn.ReLU(inplace=True),
+ nn.Linear(args.hpn_hyper_dim, ((self.ally_feats_dim + 1) * self.rnn_hidden_dim + 1) * self.n_heads)
+ ) # output shape: ally_feats_dim * rnn_hidden_dim + rnn_hidden_dim + 1, for 'rescue actions'
+ self.unify_output_heads_rescue = Merger(self.n_heads, 1)
+ else:
+ self.hyper_ally = nn.Sequential(
+ nn.Linear(self.ally_feats_dim, args.hpn_hyper_dim),
+ nn.ReLU(inplace=True),
+ nn.Linear(args.hpn_hyper_dim, self.ally_feats_dim * self.rnn_hidden_dim * self.n_heads)
+ ) # output shape: ally_feats_dim * rnn_hidden_dim
+
+ self.unify_input_heads = Merger(self.n_heads, self.rnn_hidden_dim)
+ self.rnn = nn.GRUCell(self.rnn_hidden_dim, self.rnn_hidden_dim)
+ self.fc2_normal_actions = nn.Linear(self.rnn_hidden_dim, args.output_normal_actions) # (no_op, stop, up, down, right, left)
+ self.unify_output_heads = Merger(self.n_heads, 1)
+
+ # Reset parameters for hypernets
+ # self._reset_hypernet_parameters(init_type="xavier")
+ # self._reset_hypernet_parameters(init_type="kaiming")
+
+ def _reset_hypernet_parameters(self, init_type='kaiming'):
+ gain = 2 ** (-0.5)
+ # %%%%%%%%%%%%%%%%%%%%%% Hypernet-based API input layer %%%%%%%%%%%%%%%%%%%%
+ for m in self.hyper_enemy.modules():
+ if isinstance(m, nn.Linear):
+ if init_type == "kaiming":
+ kaiming_uniform_(m.weight, m.bias, gain=gain)
+ else:
+ nn.init.xavier_normal_(m.weight.data)
+ m.bias.data.fill_(0.)
+ for m in self.hyper_ally.modules():
+ if isinstance(m, nn.Linear):
+ if init_type == "kaiming":
+ kaiming_uniform_(m.weight, m.bias, gain=gain)
+ else:
+ nn.init.xavier_normal_(m.weight.data)
+ m.bias.data.fill_(0.)
+
+ def init_hidden(self):
+ # make hidden states on same device as model
+ return self.fc1_own.weight.new(1, self.rnn_hidden_dim).zero_()
+
+ def forward(self, inputs, hidden_state):
+ # [bs, n_agents, mv_fea_dim], [bs * n_agents * n_enemies, enemy_fea_dim], [bs * n_agents * n_allies, ally_fea_dim], [bs, n_agents, own_fea_dim]
+ bs, own_feats_t, enemy_feats_t, ally_feats_t, embedding_indices = inputs
+
+ # (1) Own feature
+ embedding_own = self.fc1_own(own_feats_t) # [bs * n_agents, rnn_hidden_dim]
+
+ # (2) ID embeddings
+ if self.args.obs_agent_id:
+ agent_indices = embedding_indices[0]
+ # [bs * n_agents, rnn_hidden_dim]
+ embedding_own = embedding_own + self.agent_id_embedding(agent_indices).view(-1, self.rnn_hidden_dim)
+ if self.args.obs_last_action:
+ last_action_indices = embedding_indices[-1]
+ if last_action_indices is not None: # t != 0
+ # [bs * n_agents, rnn_hidden_dim]
+ embedding_own = embedding_own + self.action_id_embedding(last_action_indices).view(
+ -1, self.rnn_hidden_dim)
+
+ # (3) Enemy feature (enemy_feats_dim * rnn_hidden_dim + rnn_hidden_dim + 1)
+ hyper_enemy_out = self.hyper_enemy(enemy_feats_t)
+ fc1_w_enemy = hyper_enemy_out[:, :-(self.rnn_hidden_dim + 1) * self.n_heads].reshape(
+ -1, self.enemy_feats_dim, self.rnn_hidden_dim * self.n_heads
+ ) # [bs * n_agents * n_enemies, enemy_fea_dim, rnn_hidden_dim]
+ # [bs * n_agents * n_enemies, 1, enemy_fea_dim] * [bs * n_agents * n_enemies, enemy_fea_dim, rnn_hidden_dim] = [bs * n_agents * n_enemies, 1, rnn_hidden_dim]
+ embedding_enemies = th.matmul(enemy_feats_t.unsqueeze(1), fc1_w_enemy).view(
+ bs * self.n_agents, self.n_enemies, self.n_heads, self.rnn_hidden_dim
+ ) # [bs * n_agents, n_enemies, n_heads, rnn_hidden_dim]
+ embedding_enemies = embedding_enemies.sum(dim=1, keepdim=False) # [bs * n_agents, n_heads, rnn_hidden_dim]
+
+ # (4) Ally features
+ hyper_ally_out = self.hyper_ally(ally_feats_t)
+ if self.args.map_type == "MMM":
+ # [bs * n_agents * n_allies, ally_fea_dim, rnn_hidden_dim * head]
+ fc1_w_ally = hyper_ally_out[:, :-(self.rnn_hidden_dim + 1) * self.n_heads].reshape(
+ -1, self.ally_feats_dim, self.rnn_hidden_dim * self.n_heads
+ )
+ else:
+ # [bs * n_agents * n_allies, ally_fea_dim, rnn_hidden_dim * head]
+ fc1_w_ally = hyper_ally_out.view(-1, self.ally_feats_dim, self.rnn_hidden_dim * self.n_heads)
+ # [bs * n_agents * n_allies, 1, ally_fea_dim] * [bs * n_agents * n_allies, ally_fea_dim, n_heads* rnn_hidden_dim] = [bs * n_agents * n_allies, 1, n_heads*rnn_hidden_dim]
+ embedding_allies = th.matmul(ally_feats_t.unsqueeze(1), fc1_w_ally).view(
+ bs * self.n_agents, self.n_allies, self.n_heads, self.rnn_hidden_dim
+ ) # [bs * n_agents, n_allies, head, rnn_hidden_dim]
+ embedding_allies = embedding_allies.sum(dim=1, keepdim=False) # [bs * n_agents, head, rnn_hidden_dim]
+
+ # Final embedding
+ embedding = embedding_own + self.unify_input_heads(
+ embedding_enemies + embedding_allies
+ ) # [bs * n_agents, head, rnn_hidden_dim]
+
+ x = F.relu(embedding, inplace=True)
+ h_in = hidden_state.reshape(-1, self.rnn_hidden_dim)
+ hh = self.rnn(x, h_in) # [bs * n_agents, rnn_hidden_dim]
+
+ # Q-values of normal actions
+ q_normal = self.fc2_normal_actions(hh).view(bs, self.n_agents, -1) # [bs, n_agents, 6]
+
+ # Q-values of attack actions: [bs * n_agents * n_enemies, rnn_hidden_dim * n_heads]
+ fc2_w_attack = hyper_enemy_out[:, -(self.rnn_hidden_dim + 1) * self.n_heads: -self.n_heads].reshape(
+ bs * self.n_agents, self.n_enemies, self.rnn_hidden_dim, self.n_heads
+ ).transpose(1, 2).reshape( # -> [bs * n_agents, rnn_hidden_dim, n_enemies, n_heads]
+ bs * self.n_agents, self.rnn_hidden_dim, self.n_enemies * self.n_heads
+ ) # [bs * n_agents, rnn_hidden_dim, n_enemies * heads]
+ fc2_b_attack = hyper_enemy_out[:, -self.n_heads:].reshape(bs * self.n_agents, self.n_enemies * self.n_heads)
+
+ # [bs*n_agents, 1, rnn_hidden_dim] * [bs*n_agents, rnn_hidden_dim, n_enemies*head] -> [bs*n_agents, 1, n_enemies*head]
+ q_attacks = (th.matmul(hh.unsqueeze(1), fc2_w_attack).squeeze(1) + fc2_b_attack).view(
+ bs * self.n_agents * self.n_enemies, self.n_heads, 1
+ ) # [bs * n_agents, n_enemies*head] -> [bs * n_agents * n_enemies, head, 1]
+
+ # Merge multiple heads into one.
+ q_attack = self.unify_output_heads(q_attacks).view( # [bs * n_agents * n_enemies, 1]
+ bs, self.n_agents, self.n_enemies
+ ) # [bs, n_agents, n_enemies]
+
+ # %%%%%%%%%%%%%%% 'rescue' actions for map_type == "MMM" %%%%%%%%%%%%%%%
+ if self.args.map_type == "MMM":
+ fc2_w_rescue = hyper_ally_out[:, -(self.rnn_hidden_dim + 1) * self.n_heads: -self.n_heads].reshape(
+ bs * self.n_agents, self.n_allies, self.rnn_hidden_dim, self.n_heads
+ ).transpose(1, 2).reshape( # -> [bs * n_agents, rnn_hidden_dim, n_allies, n_heads]
+ bs * self.n_agents, self.rnn_hidden_dim, self.n_allies * self.n_heads
+ ) # [bs * n_agents, rnn_hidden_dim, n_allies * heads]
+ fc2_b_rescue = hyper_ally_out[:, -self.n_heads:].reshape(bs * self.n_agents, self.n_allies * self.n_heads)
+ # [bs*n_agents, 1, rnn_hidden_dim] * [bs*n_agents, rnn_hidden_dim, n_allies*head] -> [bs*n_agents, 1, n_allies*head]
+ q_rescues = (th.matmul(hh.unsqueeze(1), fc2_w_rescue).squeeze(1) + fc2_b_rescue).view(
+ bs * self.n_agents * self.n_allies, self.n_heads, 1
+ ) # [bs * n_agents, n_allies*head] -> [bs * n_agents * n_allies, head, 1]
+ # Merge multiple heads into one.
+ q_rescue = self.unify_output_heads_rescue(q_rescues).view( # [bs * n_agents * n_allies, 1]
+ bs, self.n_agents, self.n_allies
+ ) # [bs, n_agents, n_allies]
+
+ # For the reason that medivac is the last indexed agent, so the rescue action idx -> [0, n_allies-1]
+ right_padding = th.ones_like(q_attack[:, -1:, self.n_allies:], requires_grad=False) * (-9999999)
+ modified_q_attack_of_medivac = th.cat([q_rescue[:, -1:, :], right_padding], dim=-1)
+ q_attack = th.cat([q_attack[:, :-1], modified_q_attack_of_medivac], dim=1)
+
+ # Concat 2 types of Q-values
+ q = th.cat((q_normal, q_attack), dim=-1) # [bs, n_agents, 6 + n_enemies]
+ return q.view(bs, self.n_agents, -1), hh.view(bs, self.n_agents, -1) # [bs, n_agents, 6 + n_enemies]
diff --git a/src/modules/agents/n_rnn_agent.py b/src/modules/agents/n_rnn_agent.py
new file mode 100644
index 0000000..ae67686
--- /dev/null
+++ b/src/modules/agents/n_rnn_agent.py
@@ -0,0 +1,46 @@
+import torch.nn as nn
+import torch.nn.functional as F
+import torch as th
+import numpy as np
+import torch.nn.init as init
+from utils.th_utils import orthogonal_init_
+from torch.nn import LayerNorm
+
+
+class NRNNAgent(nn.Module):
+ """
+ n_rnn 30.412K for 5m_vs_6m
+ """
+ def __init__(self, input_shape, args):
+ super(NRNNAgent, self).__init__()
+ self.args = args
+
+ self.fc1 = nn.Linear(input_shape, args.rnn_hidden_dim)
+ self.rnn = nn.GRUCell(args.rnn_hidden_dim, args.rnn_hidden_dim)
+ self.fc2 = nn.Linear(args.rnn_hidden_dim, args.n_actions)
+
+ if getattr(args, "use_layer_norm", False):
+ self.layer_norm = LayerNorm(args.rnn_hidden_dim)
+
+ if getattr(args, "use_orthogonal", False):
+ orthogonal_init_(self.fc1)
+ orthogonal_init_(self.fc2, gain=args.gain)
+
+ def init_hidden(self):
+ # make hidden states on same device as model
+ return self.fc1.weight.new(1, self.args.rnn_hidden_dim).zero_()
+
+ def forward(self, inputs, hidden_state):
+ b, a, e = inputs.size()
+
+ inputs = inputs.view(-1, e)
+ x = F.relu(self.fc1(inputs), inplace=True)
+ h_in = hidden_state.reshape(-1, self.args.rnn_hidden_dim)
+ hh = self.rnn(x, h_in)
+
+ if getattr(self.args, "use_layer_norm", False):
+ q = self.fc2(self.layer_norm(hh))
+ else:
+ q = self.fc2(hh)
+
+ return q.view(b, a, -1), hh.view(b, a, -1)
\ No newline at end of file
diff --git a/src/modules/agents/rnn_agent.py b/src/modules/agents/rnn_agent.py
new file mode 100644
index 0000000..1c0c1ab
--- /dev/null
+++ b/src/modules/agents/rnn_agent.py
@@ -0,0 +1,26 @@
+import torch.nn as nn
+import torch.nn.functional as F
+
+
+class RNNAgent(nn.Module):
+ def __init__(self, input_shape, args):
+ super(RNNAgent, self).__init__()
+ self.args = args
+
+ self.fc1 = nn.Linear(input_shape, args.rnn_hidden_dim)
+ self.rnn = nn.GRUCell(args.rnn_hidden_dim, args.rnn_hidden_dim)
+ self.fc2 = nn.Linear(args.rnn_hidden_dim, args.n_actions)
+
+ def init_hidden(self):
+ # make hidden states on same device as model
+ return self.fc1.weight.new(1, self.args.rnn_hidden_dim).zero_()
+
+ def forward(self, inputs, hidden_state=None):
+ b, a, e = inputs.size()
+
+ x = F.relu(self.fc1(inputs.view(-1, e)), inplace=True)
+ if hidden_state is not None:
+ hidden_state = hidden_state.reshape(-1, self.args.rnn_hidden_dim)
+ h = self.rnn(x, hidden_state)
+ q = self.fc2(h)
+ return q.view(b, a, -1), h.view(b, a, -1)
\ No newline at end of file
diff --git a/src/modules/agents/updet_agent.py b/src/modules/agents/updet_agent.py
new file mode 100644
index 0000000..959cde0
--- /dev/null
+++ b/src/modules/agents/updet_agent.py
@@ -0,0 +1,205 @@
+import torch.nn as nn
+import torch.nn.functional as F
+import torch
+import argparse
+import torch as th
+
+
+class UPDeT(nn.Module):
+ def __init__(self, input_shape, args):
+ super(UPDeT, self).__init__()
+ self.args = args
+ self.input_shape = input_shape # (5, (6, 5), (4, 5)) for 5m vs 6m
+ self.n_agents = args.n_agents
+
+ self.transformer = Transformer(input_shapes=input_shape, emb=args.transformer_embed_dim,
+ heads=args.transformer_heads, depth=args.transformer_depth,
+ output_dim=args.transformer_embed_dim)
+ self.q_basic = nn.Linear(args.transformer_embed_dim, 6)
+
+ def init_hidden(self):
+ # make hidden states on same device as model
+ return self.q_basic.weight.new(1, self.args.transformer_embed_dim).zero_()
+
+ def forward(self, inputs, hidden_state):
+ # (bs * n_agents, 1, transformer_embed_dim]
+ hidden_state = hidden_state.reshape(-1, 1, self.args.transformer_embed_dim)
+
+ # transformer-out: torch.Size([b * n_agents, 1+n_enemies+(n_agents-1)+1, transformer_embed_dim])
+ # in dim 1: self_fea_att_value, m enemy_fea_att_value, n-1 ally_fea_att_value, hidden_state
+ outputs, _ = self.transformer.forward(
+ inputs, hidden_state, None)
+
+ # first output for 6 action (no_op stop up down left right)
+ q_basic_actions = self.q_basic(outputs[:, 0, :])
+
+ # last dim for hidden state
+ h = outputs[:, -1:, :]
+
+ # Replace the loop with batch computing
+ # q_enemies_list = []
+ # # each enemy has an output Q
+ # for i in range(self.args.n_enemies):
+ # q_enemy = self.q_basic(outputs[:, 1 + i, :])
+ # q_enemy_mean = torch.mean(q_enemy, 1, True)
+ # q_enemies_list.append(q_enemy_mean)
+ # # concat enemy Q over all enemies
+ # q_enemies = torch.stack(q_enemies_list, dim=1).squeeze()
+
+ q_enemies = self.q_basic(
+ outputs[:, 1: 1 + self.args.n_enemies, :]) # [bs * n_agents, n_enemies, 32]->[bs * n_agents, n_enemies, 6]
+ q_enemies = q_enemies.mean(dim=-1, keepdim=False) # The average of the Move Action Q
+
+ # concat basic action Q with enemy attack Q
+ q = torch.cat((q_basic_actions, q_enemies), 1)
+
+ return q, h # [bs * n_agents, 6 + n_enemies], this shape will be reshaped to [bs, n_agents, 6 + n_enemies] in forward() of the BasicMAC
+
+
+class SelfAttention(nn.Module):
+ def __init__(self, emb, heads=8, mask=False):
+ super(SelfAttention, self).__init__()
+
+ self.emb = emb
+ self.heads = heads
+ self.mask = mask
+
+ self.tokeys = nn.Linear(emb, emb * heads, bias=False)
+ self.toqueries = nn.Linear(emb, emb * heads, bias=False)
+ self.tovalues = nn.Linear(emb, emb * heads, bias=False)
+
+ self.unifyheads = nn.Linear(heads * emb, emb)
+
+ def forward(self, x, mask):
+
+ b, t, e = x.size()
+ h = self.heads
+ keys = self.tokeys(x).view(b, t, h, e)
+ queries = self.toqueries(x).view(b, t, h, e)
+ values = self.tovalues(x).view(b, t, h, e)
+
+ # compute scaled dot-product self-attention
+
+ # - fold heads into the batch dimension
+ keys = keys.transpose(1, 2).contiguous().view(b * h, t, e)
+ queries = queries.transpose(1, 2).contiguous().view(b * h, t, e)
+ values = values.transpose(1, 2).contiguous().view(b * h, t, e)
+
+ queries = queries / (e ** (1 / 4))
+ keys = keys / (e ** (1 / 4))
+ # - Instead of dividing the dot products by sqrt(e), we scale the keys and values.
+ # This should be more memory efficient
+
+ # - get dot product of queries and keys, and scale
+ dot = torch.bmm(queries, keys.transpose(1, 2))
+
+ assert dot.size() == (b * h, t, t)
+
+ if self.mask: # mask out the upper half of the dot matrix, excluding the diagonal
+ mask_(dot, maskval=float('-inf'), mask_diagonal=False)
+
+ if mask is not None:
+ dot = dot.masked_fill(mask == 0, -1e9)
+
+ dot = F.softmax(dot, dim=2)
+ # - dot now has row-wise self-attention probabilities
+
+ # apply the self attention to the values
+ out = torch.bmm(dot, values).view(b, h, t, e)
+
+ # swap h, t back, unify heads
+ out = out.transpose(1, 2).contiguous().view(b, t, h * e)
+
+ return self.unifyheads(out) # [b, n_entities, e]
+
+
+class TransformerBlock(nn.Module):
+
+ def __init__(self, emb, heads, mask, ff_hidden_mult=4, dropout=0.0):
+ super(TransformerBlock, self).__init__()
+ self.attention = SelfAttention(emb, heads=heads, mask=mask)
+ self.mask = mask
+ self.norm1 = nn.LayerNorm(emb)
+ self.norm2 = nn.LayerNorm(emb)
+ self.ff = nn.Sequential(
+ nn.Linear(emb, ff_hidden_mult * emb),
+ nn.ReLU(),
+ nn.Linear(ff_hidden_mult * emb, emb)
+ )
+ self.do = nn.Dropout(dropout)
+
+ def forward(self, x_mask):
+ x, mask = x_mask
+ attended = self.attention(x, mask)
+ x = self.norm1(attended + x)
+ x = self.do(x)
+ fedforward = self.ff(x)
+ x = self.norm2(fedforward + x)
+ x = self.do(x)
+ return x, mask
+
+
+class Transformer(nn.Module):
+
+ def __init__(self, input_shapes, emb, heads, depth, output_dim):
+ super(Transformer, self).__init__()
+ self.num_tokens = output_dim
+
+ self.input_shapes = input_shapes # (5, (6, 5), (4, 5)) for 5m vs 6m
+ # use the max_dim to init the token layer (to support all maps)
+ token_dim = max([input_shapes[0], input_shapes[1][-1], input_shapes[2][-1]])
+ self.token_embedding = nn.Linear(token_dim, emb)
+
+ tblocks = []
+ for i in range(depth):
+ tblocks.append(TransformerBlock(emb=emb, heads=heads, mask=False))
+ self.tblocks = nn.Sequential(*tblocks)
+ self.toprobs = nn.Linear(emb, output_dim)
+
+ def forward(self, inputs, h, mask):
+ """
+
+ :param inputs: cat([(bs * n_agents, 1, -1), (bs * n_agents, n_enemies, -1), (bs * n_agents, (n_agents-1), -1)], dim=1)
+ :param h:
+ :param mask:
+ :return:
+ """
+ tokens = self.token_embedding(inputs) # (bs * n_agents, 1 + n_enemies + (n_agents-1), emb)
+
+ # Append hidden state to the end
+ tokens = torch.cat((tokens, h), 1) # tokens+h: torch.Size([5, 12, 32])
+
+ b, t, e = tokens.size()
+
+ x, mask = self.tblocks((tokens, mask))
+ # print("transformer-out:", x.shape) # transformer-out: torch.Size([5, 12, 32])
+
+ x = self.toprobs(x.view(b * t, e)).view(b, t, self.num_tokens) # torch.Size([5, 12, 32])
+ return x, tokens
+
+
+def mask_(matrices, maskval=0.0, mask_diagonal=True):
+ b, h, w = matrices.size()
+ indices = torch.triu_indices(h, w, offset=0 if mask_diagonal else 1)
+ matrices[:, indices[0], indices[1]] = maskval
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description='Unit Testing')
+ parser.add_argument('--token_dim', default='5', type=int)
+ parser.add_argument('--emb', default='32', type=int)
+ parser.add_argument('--heads', default='3', type=int)
+ parser.add_argument('--depth', default='2', type=int)
+ parser.add_argument('--ally_num', default='5', type=int)
+ parser.add_argument('--enemy_num', default='5', type=int)
+ parser.add_argument('--episode', default='20', type=int)
+ args = parser.parse_args()
+
+ # testing the agent
+ agent = UPDeT(None, args).cuda()
+ hidden_state = agent.init_hidden().cuda().expand(args.ally_num, 1, -1)
+ tensor = torch.rand(args.ally_num, args.ally_num + args.enemy_num, args.token_dim).cuda()
+ q_list = []
+ for _ in range(args.episode):
+ q, hidden_state = agent.forward(tensor, hidden_state, args.ally_num, args.enemy_num)
+ q_list.append(q)
diff --git a/src/modules/critics/__init__.py b/src/modules/critics/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/modules/critics/centralv.py b/src/modules/critics/centralv.py
new file mode 100644
index 0000000..71e580e
--- /dev/null
+++ b/src/modules/critics/centralv.py
@@ -0,0 +1,36 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+
+class CentralVCritic(nn.Module):
+ def __init__(self, scheme, args):
+ super(CentralVCritic, self).__init__()
+
+ self.args = args
+ self.n_actions = args.n_actions
+ self.n_agents = args.n_agents
+
+ input_shape = self._get_input_shape(scheme)
+ self.output_type = "v"
+
+ # Set up network layers
+ self.fc1 = nn.Sequential(nn.Linear(input_shape, 256),
+ nn.ReLU(inplace=True),
+ nn.Linear(256, 256),
+ nn.ReLU(inplace=True),
+ nn.Linear(256, 1)
+ )
+
+ def forward(self, batch, t=None):
+ inputs = self._build_inputs(batch, t=t)
+ q = self.fc1(inputs)
+ return q
+
+ def _build_inputs(self, batch, t=None):
+ ts = slice(None) if t is None else slice(t, t+1)
+ return batch["state"][:, ts]
+
+ def _get_input_shape(self, scheme):
+ input_shape = scheme["state"]["vshape"]
+ return input_shape
\ No newline at end of file
diff --git a/src/modules/critics/coma.py b/src/modules/critics/coma.py
new file mode 100644
index 0000000..ddc9ab2
--- /dev/null
+++ b/src/modules/critics/coma.py
@@ -0,0 +1,70 @@
+import torch as th
+import torch.nn as nn
+import torch.nn.functional as F
+
+
+class COMACritic(nn.Module):
+ def __init__(self, scheme, args):
+ super(COMACritic, self).__init__()
+
+ self.args = args
+ self.n_actions = args.n_actions
+ self.n_agents = args.n_agents
+
+ input_shape = self._get_input_shape(scheme)
+ self.output_type = "q"
+
+ # Set up network layers
+ self.fc1 = nn.Linear(input_shape, 128)
+ self.fc2 = nn.Linear(128, 128)
+ self.fc3 = nn.Linear(128, self.n_actions)
+
+ def forward(self, batch, t=None):
+ inputs = self._build_inputs(batch, t=t)
+ x = F.relu(self.fc1(inputs))
+ x = F.relu(self.fc2(x))
+ q = self.fc3(x)
+ return q
+
+ def _build_inputs(self, batch, t=None):
+ bs = batch.batch_size
+ max_t = batch.max_seq_length if t is None else 1
+ ts = slice(None) if t is None else slice(t, t+1)
+ inputs = []
+ # state
+ inputs.append(batch["state"][:, ts].unsqueeze(2).repeat(1, 1, self.n_agents, 1))
+
+ # observation
+ inputs.append(batch["obs"][:, ts])
+
+ # actions (masked out by agent)
+ actions = batch["actions_onehot"][:, ts].view(bs, max_t, 1, -1).repeat(1, 1, self.n_agents, 1)
+ agent_mask = (1 - th.eye(self.n_agents, device=batch.device))
+ agent_mask = agent_mask.view(-1, 1).repeat(1, self.n_actions).view(self.n_agents, -1)
+ inputs.append(actions * agent_mask.unsqueeze(0).unsqueeze(0))
+
+ # last actions
+ if t == 0:
+ inputs.append(th.zeros_like(batch["actions_onehot"][:, 0:1]).view(bs, max_t, 1, -1).repeat(1, 1, self.n_agents, 1))
+ elif isinstance(t, int):
+ inputs.append(batch["actions_onehot"][:, slice(t-1, t)].view(bs, max_t, 1, -1).repeat(1, 1, self.n_agents, 1))
+ else:
+ last_actions = th.cat([th.zeros_like(batch["actions_onehot"][:, 0:1]), batch["actions_onehot"][:, :-1]], dim=1)
+ last_actions = last_actions.view(bs, max_t, 1, -1).repeat(1, 1, self.n_agents, 1)
+ inputs.append(last_actions)
+
+ inputs.append(th.eye(self.n_agents, device=batch.device).unsqueeze(0).unsqueeze(0).expand(bs, max_t, -1, -1))
+
+ inputs = th.cat([x.reshape(bs, max_t, self.n_agents, -1) for x in inputs], dim=-1)
+ return inputs
+
+ def _get_input_shape(self, scheme):
+ # state
+ input_shape = scheme["state"]["vshape"]
+ # observation
+ input_shape += scheme["obs"]["vshape"]
+ # actions and last actions
+ input_shape += scheme["actions_onehot"]["vshape"][0] * self.n_agents * 2
+ # agent id
+ input_shape += self.n_agents
+ return input_shape
\ No newline at end of file
diff --git a/src/modules/critics/fmac_critic.py b/src/modules/critics/fmac_critic.py
new file mode 100644
index 0000000..df70c07
--- /dev/null
+++ b/src/modules/critics/fmac_critic.py
@@ -0,0 +1,56 @@
+from numpy.core.numeric import True_
+import torch as th
+import torch.nn as nn
+import torch.nn.functional as F
+from modules.layer.self_atten import SelfAttention
+
+
+class FMACCritic(nn.Module):
+ def __init__(self, scheme, args):
+ super(FMACCritic, self).__init__()
+ self.args = args
+ self.n_actions = args.n_actions
+ self.n_agents = args.n_agents
+ self.input_shape = self._get_input_shape(scheme)
+ self.output_type = "q"
+ self.hidden_states = None
+ self.critic_hidden_dim = args.critic_hidden_dim
+
+ # Set up network layers
+ self.fc1 = nn.Linear(self.input_shape + self.n_actions, self.critic_hidden_dim)
+ self.fc2 = nn.Linear(self.critic_hidden_dim, self.critic_hidden_dim)
+ self.fc3 = nn.Linear(self.critic_hidden_dim, 1)
+
+ def forward(self, inputs, actions, hidden_state=None):
+ bs = inputs.batch_size
+ ts = inputs.max_seq_length
+
+ inputs = self._build_inputs(inputs)
+ inputs = th.cat([inputs, actions], dim=-1)
+ x = F.relu(self.fc1(inputs), inplace=True)
+ x = F.relu(self.fc2(x), inplace=True)
+ q1 = self.fc3(x)
+
+ return q1, hidden_state
+
+ def _build_inputs(self, batch):
+ # Assumes homogenous agents with flat observations.
+ # Other MACs might want to e.g. delegate building inputs to each agent
+ bs = batch.batch_size
+ ts = batch.max_seq_length
+ inputs = []
+ inputs.append(batch["obs"]) # b1av
+ # inputs.append(batch["state"].unsqueeze(2).repeat(1, 1, self.n_agents, 1)) # b1av
+ if self.args.obs_agent_id:
+ inputs.append(th.eye(self.n_agents, device=batch.device)\
+ .unsqueeze(0).unsqueeze(0).expand(bs, ts, -1, -1))
+ inputs = th.cat([x.reshape(bs, ts, self.n_agents, -1) for x in inputs], dim=-1)
+ return inputs
+
+ def _get_input_shape(self, scheme):
+ input_shape = scheme["obs"]["vshape"]
+ # input_shape += scheme["state"]["vshape"]
+ if self.args.obs_agent_id:
+ input_shape += self.n_agents
+
+ return input_shape
\ No newline at end of file
diff --git a/src/modules/critics/lica.py b/src/modules/critics/lica.py
new file mode 100644
index 0000000..d814c1e
--- /dev/null
+++ b/src/modules/critics/lica.py
@@ -0,0 +1,57 @@
+import torch as th
+import torch.nn as nn
+import torch.nn.functional as F
+import numpy as np
+
+
+class LICACritic(nn.Module):
+ def __init__(self, scheme, args):
+ super(LICACritic, self).__init__()
+
+ self.args = args
+ self.n_actions = args.n_actions
+ self.n_agents = args.n_agents
+
+ self.output_type = "q"
+
+ # Set up network layers
+ self.state_dim = int(np.prod(args.state_shape))
+ self.weight_dim = args.lica_mixing_embed_dim * self.n_agents * self.n_actions
+ self.hid_dim = args.hypernet_embed_dim
+
+ self.hyper_w_1 = nn.Sequential(nn.Linear(self.state_dim, self.hid_dim),
+ nn.ReLU(),
+ nn.Linear(self.hid_dim , self.weight_dim))
+ self.hyper_w_final = nn.Sequential(nn.Linear(self.state_dim, self.hid_dim),
+ nn.ReLU(),
+ nn.Linear(self.hid_dim, args.lica_mixing_embed_dim))
+
+ # State dependent bias for hidden layer
+ self.hyper_b_1 = nn.Linear(self.state_dim, args.lica_mixing_embed_dim)
+
+ self.hyper_b_2 = nn.Sequential(nn.Linear(self.state_dim, self.hid_dim),
+ nn.ReLU(),
+ nn.Linear(self.hid_dim, 1))
+
+ def forward(self, act, states):
+ bs = states.size(0)
+ states = states.reshape(-1, self.state_dim)
+ action_probs = act.reshape(-1, 1, self.n_agents * self.n_actions)
+
+ # first layer
+ w1 = self.hyper_w_1(states)
+ b1 = self.hyper_b_1(states)
+ w1 = w1.view(-1, self.n_agents * self.n_actions, self.args.lica_mixing_embed_dim)
+ b1 = b1.view(-1, 1, self.args.lica_mixing_embed_dim)
+
+ h = th.relu(th.bmm(action_probs, w1) + b1)
+
+ # second layer
+ w_final = self.hyper_w_final(states)
+ w_final = w_final.view(-1, self.args.lica_mixing_embed_dim, 1)
+ b2 = self.hyper_b_2(states).view(-1, 1, 1)
+
+ q = th.bmm(h, w_final )+ b2
+ q = q.view(bs, -1, 1)
+
+ return q
\ No newline at end of file
diff --git a/src/modules/critics/offpg.py b/src/modules/critics/offpg.py
new file mode 100644
index 0000000..73bbf81
--- /dev/null
+++ b/src/modules/critics/offpg.py
@@ -0,0 +1,65 @@
+import torch as th
+import torch.nn as nn
+import torch.nn.functional as F
+
+
+class OffPGCritic(nn.Module):
+ def __init__(self, scheme, args):
+ super(OffPGCritic, self).__init__()
+
+ self.args = args
+ self.n_actions = args.n_actions
+ self.n_agents = args.n_agents
+
+ input_shape = self._get_input_shape(scheme)
+ self.output_type = "q"
+
+ # Set up network layers
+ self.fc1 = nn.Linear(input_shape, 256)
+ self.fc2 = nn.Linear(256, 256)
+ self.fc_v = nn.Linear(256, 1)
+ self.fc3 = nn.Linear(256, self.n_actions)
+
+ def forward(self, inputs):
+ x = F.relu(self.fc1(inputs))
+ x = F.relu(self.fc2(x))
+ v = self.fc_v(x)
+ a = self.fc3(x)
+ q = a + v
+ return q
+
+ def _build_inputs(self, batch, bs, max_t):
+ inputs = []
+ # state, obs, action
+ inputs.append(batch["state"][:].unsqueeze(2).repeat(1, 1, self.n_agents, 1))
+ inputs.append(batch["obs"][:])
+ #actions = batch["actions_onehot"][:].view(bs, max_t, 1, -1).repeat(1, 1, self.n_agents, 1)
+ #agent_mask = (1 - th.eye(self.n_agents, device=batch.device))
+ #agent_mask = agent_mask.view(-1, 1).repeat(1, self.n_actions).view(self.n_agents, -1)
+ #inputs.append(actions * agent_mask.unsqueeze(0).unsqueeze(0))
+ # last actions
+ #if self.args.obs_last_action:
+ # last_action = []
+ # last_action.append(actions[:, 0:1].squeeze(2))
+ # last_action.append(actions[:, :-1].squeeze(2))
+ # last_action = th.cat([x for x in last_action], dim = 1)
+ # inputs.append(last_action)
+ #agent id
+ inputs.append(th.eye(self.n_agents, device=batch.device).unsqueeze(0).unsqueeze(0).expand(bs, max_t, -1, -1))
+ inputs = th.cat([x.reshape(bs, max_t, self.n_agents, -1) for x in inputs], dim=-1)
+ return inputs
+
+
+
+ def _get_input_shape(self, scheme):
+ # state
+ input_shape = scheme["state"]["vshape"]
+ # observation
+ input_shape += scheme["obs"]["vshape"]
+ # actions and last actions
+ #input_shape += scheme["actions_onehot"]["vshape"][0] * self.n_agents
+ #if self.args.obs_last_action:
+ # input_shape += scheme["actions_onehot"]["vshape"][0] * self.n_agents
+ # agent id
+ input_shape += self.n_agents
+ return input_shape
diff --git a/src/modules/layer/__init__.py b/src/modules/layer/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/modules/layer/self_atten.py b/src/modules/layer/self_atten.py
new file mode 100644
index 0000000..5d58f87
--- /dev/null
+++ b/src/modules/layer/self_atten.py
@@ -0,0 +1,43 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+class SelfAttention(nn.Module):
+ def __init__(self, input_size, heads, embed_size):
+ super().__init__()
+ self.input_size = input_size
+ self.heads = heads
+ self.emb_size = embed_size
+
+ self.tokeys = nn.Linear(self.input_size, self.emb_size * heads, bias = False)
+ self.toqueries = nn.Linear(self.input_size, self.emb_size * heads, bias = False)
+ self.tovalues = nn.Linear(self.input_size, self.emb_size * heads, bias = False)
+
+ def forward(self, x):
+ b, t, hin = x.size()
+ assert hin == self.input_size, 'Input size {} should match {}'.format(hin, self.input_size)
+
+ h = self.heads
+ e = self.emb_size
+
+ keys = self.tokeys(x).view(b, t, h, e)
+ queries = self.toqueries(x).view(b, t, h, e)
+ values = self.tovalues(x).view(b, t, h, e)
+
+ # dot-product attention
+ # folding heads to batch dimensions
+ keys = keys.transpose(1, 2).contiguous().view(b * h, t, e)
+ queries = queries.transpose(1, 2).contiguous().view(b * h, t, e)
+ values = values.transpose(1, 2).contiguous().view(b * h, t, e)
+
+ queries = queries / (e ** (1/4))
+ keys = keys / (e ** (1/4))
+
+ dot = torch.bmm(queries, keys.transpose(1, 2))
+ assert dot.size() == (b*h, t, t)
+
+ # row wise self attention probabilities
+ dot = F.softmax(dot, dim=2)
+ out = torch.bmm(dot, values).view(b, h, t, e)
+ out = out.transpose(1, 2).contiguous().view(b, t, h * e)
+ return out
\ No newline at end of file
diff --git a/src/modules/mixers/__init__.py b/src/modules/mixers/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/modules/mixers/api_qmix.py b/src/modules/mixers/api_qmix.py
new file mode 100644
index 0000000..927485f
--- /dev/null
+++ b/src/modules/mixers/api_qmix.py
@@ -0,0 +1,74 @@
+import torch as th
+import torch.nn as nn
+import torch.nn.functional as F
+import numpy as np
+from utils.th_utils import orthogonal_init_
+from torch.nn import LayerNorm
+
+class Mixer(nn.Module):
+ def __init__(self, args, abs=True):
+ super(Mixer, self).__init__()
+
+ self.args = args
+ self.n_agents = args.n_agents
+ self.embed_dim = args.mixing_embed_dim
+ self.input_dim = self.state_dim = int(np.prod(args.state_shape))
+
+ self.abs = abs # monotonicity constraint
+ self.qmix_pos_func = getattr(self.args, "qmix_pos_func", "abs")
+ assert self.qmix_pos_func == "abs"
+
+ # hyper w1 b1
+ self.hyper_w1 = nn.Sequential(nn.Linear(self.input_dim, args.hypernet_embed),
+ nn.ReLU(inplace=True),
+ nn.Linear(args.hypernet_embed, self.n_agents * self.embed_dim))
+ self.hyper_b1 = nn.Sequential(nn.Linear(self.input_dim, self.embed_dim))
+
+ # hyper w2 b2
+ self.hyper_w2 = nn.Sequential(nn.Linear(self.input_dim, args.hypernet_embed),
+ nn.ReLU(inplace=True),
+ nn.Linear(args.hypernet_embed, self.embed_dim))
+ self.hyper_b2 = nn.Sequential(nn.Linear(self.input_dim, self.embed_dim),
+ nn.ReLU(inplace=True),
+ nn.Linear(self.embed_dim, 1))
+
+ if getattr(args, "use_orthogonal", False):
+ raise NotImplementedError
+ for m in self.modules():
+ orthogonal_init_(m)
+
+ def forward(self, qvals, states):
+ # reshape
+ b, t, _ = qvals.size()
+
+ qvals = qvals.reshape(b * t, 1, self.n_agents)
+ states = states.reshape(-1, self.state_dim)
+
+ # First layer
+ w1 = self.hyper_w1(states).view(-1, self.n_agents, self.embed_dim) # b * t, n_agents, emb
+ b1 = self.hyper_b1(states).view(-1, 1, self.embed_dim)
+
+ # Second layer
+ w2 = self.hyper_w2(states).view(-1, self.embed_dim, 1) # b * t, emb, 1
+ b2= self.hyper_b2(states).view(-1, 1, 1)
+
+ if self.abs:
+ w1 = self.pos_func(w1)
+ w2 = self.pos_func(w2)
+ # print(w1.mean(), w1.var())
+ # print(w2.mean(), w2.var())
+
+ # Forward
+ hidden = F.elu(th.matmul(qvals, w1) + b1) # b * t, 1, emb
+ y = th.matmul(hidden, w2) + b2 # b * t, 1, 1
+
+ return y.view(b, t, -1)
+
+ def pos_func(self, x):
+ if self.qmix_pos_func == "softplus":
+ return th.nn.Softplus(beta=self.args.qmix_pos_func_beta)(x)
+ elif self.qmix_pos_func == "quadratic":
+ return 0.5 * x ** 2
+ else:
+ return th.abs(x)
+
diff --git a/src/modules/mixers/dmaq_general.py b/src/modules/mixers/dmaq_general.py
new file mode 100644
index 0000000..0aa1357
--- /dev/null
+++ b/src/modules/mixers/dmaq_general.py
@@ -0,0 +1,92 @@
+# From https://github.com/wjh720/QPLEX/, added here for convenience.
+import torch as th
+import torch.nn as nn
+import torch.nn.functional as F
+import numpy as np
+from .dmaq_si_weight import DMAQ_SI_Weight
+
+
+class DMAQer(nn.Module):
+ def __init__(self, args):
+ super(DMAQer, self).__init__()
+
+ self.args = args
+ self.n_agents = args.n_agents
+ self.n_actions = args.n_actions
+ self.state_dim = int(np.prod(args.state_shape))
+ self.action_dim = args.n_agents * self.n_actions
+ self.state_action_dim = self.state_dim + self.action_dim + 1
+
+ self.embed_dim = args.mixing_embed_dim
+
+ hypernet_embed = self.args.hypernet_embed
+ self.hyper_w_final = nn.Sequential(nn.Linear(self.state_dim, hypernet_embed),
+ nn.ReLU(),
+ nn.Linear(hypernet_embed, self.n_agents))
+ self.V = nn.Sequential(nn.Linear(self.state_dim, hypernet_embed),
+ nn.ReLU(),
+ nn.Linear(hypernet_embed, self.n_agents))
+
+ self.si_weight = DMAQ_SI_Weight(args)
+
+ def calc_v(self, agent_qs):
+ agent_qs = agent_qs.view(-1, self.n_agents)
+ v_tot = th.sum(agent_qs, dim=-1)
+ return v_tot
+
+ def calc_adv(self, agent_qs, states, actions, max_q_i):
+ states = states.reshape(-1, self.state_dim)
+ actions = actions.reshape(-1, self.action_dim)
+ agent_qs = agent_qs.view(-1, self.n_agents)
+ max_q_i = max_q_i.view(-1, self.n_agents)
+
+ adv_q = (agent_qs - max_q_i).view(-1, self.n_agents).detach()
+
+ adv_w_final = self.si_weight(states, actions)
+ adv_w_final = adv_w_final.view(-1, self.n_agents)
+
+ if self.args.is_minus_one:
+ adv_tot = th.sum(adv_q * (adv_w_final - 1.), dim=1)
+ else:
+ adv_tot = th.sum(adv_q * adv_w_final, dim=1)
+ return adv_tot
+
+ def calc(self, agent_qs, states, actions=None, max_q_i=None, is_v=False):
+ if is_v:
+ v_tot = self.calc_v(agent_qs)
+ return v_tot
+ else:
+ adv_tot = self.calc_adv(agent_qs, states, actions, max_q_i)
+ return adv_tot
+
+ def forward(self, agent_qs, states, actions=None, max_q_i=None, is_v=False):
+ """
+ :param agent_qs: selected individual Q(s,a)
+ :param states:
+ :param actions:
+ :param max_q_i:
+ :param is_v:
+ :return:
+ """
+ bs = agent_qs.size(0)
+ states = states.reshape(-1, self.state_dim)
+ agent_qs = agent_qs.view(-1, self.n_agents)
+
+ w_final = self.hyper_w_final(states)
+ w_final = th.abs(w_final)
+ w_final = w_final.view(-1, self.n_agents) + 1e-10
+ v = self.V(states)
+ v = v.view(-1, self.n_agents)
+
+ if self.args.weighted_head:
+ agent_qs = w_final * agent_qs + v
+
+ if not is_v:
+ max_q_i = max_q_i.view(-1, self.n_agents)
+ if self.args.weighted_head:
+ max_q_i = w_final * max_q_i + v
+
+ y = self.calc(agent_qs, states, actions=actions, max_q_i=max_q_i, is_v=is_v)
+ v_tot = y.view(bs, -1, 1)
+
+ return v_tot
diff --git a/src/modules/mixers/dmaq_si_weight.py b/src/modules/mixers/dmaq_si_weight.py
new file mode 100644
index 0000000..da11a38
--- /dev/null
+++ b/src/modules/mixers/dmaq_si_weight.py
@@ -0,0 +1,81 @@
+# From https://github.com/wjh720/QPLEX/, added here for convenience.
+import torch as th
+import torch.nn as nn
+import torch.nn.functional as F
+import numpy as np
+
+
+class DMAQ_SI_Weight(nn.Module):
+ def __init__(self, args):
+ super(DMAQ_SI_Weight, self).__init__()
+
+ self.args = args
+ self.n_agents = args.n_agents
+ self.n_actions = args.n_actions
+ self.state_dim = int(np.prod(args.state_shape))
+ self.action_dim = args.n_agents * self.n_actions
+ self.state_action_dim = self.state_dim + self.action_dim
+
+ self.num_kernel = args.num_kernel
+
+ self.key_extractors = nn.ModuleList()
+ self.agents_extractors = nn.ModuleList()
+ self.action_extractors = nn.ModuleList()
+
+ adv_hypernet_embed = self.args.adv_hypernet_embed
+ for i in range(self.num_kernel): # multi-head attention
+ if getattr(args, "adv_hypernet_layers", 1) == 1:
+ self.key_extractors.append(nn.Linear(self.state_dim, 1)) # key
+ self.agents_extractors.append(nn.Linear(self.state_dim, self.n_agents)) # agent
+ self.action_extractors.append(nn.Linear(self.state_action_dim, self.n_agents)) # action
+ elif getattr(args, "adv_hypernet_layers", 1) == 2:
+ self.key_extractors.append(nn.Sequential(nn.Linear(self.state_dim, adv_hypernet_embed),
+ nn.ReLU(),
+ nn.Linear(adv_hypernet_embed, 1))) # key
+ self.agents_extractors.append(nn.Sequential(nn.Linear(self.state_dim, adv_hypernet_embed),
+ nn.ReLU(),
+ nn.Linear(adv_hypernet_embed, self.n_agents))) # agent
+ self.action_extractors.append(nn.Sequential(nn.Linear(self.state_action_dim, adv_hypernet_embed),
+ nn.ReLU(),
+ nn.Linear(adv_hypernet_embed, self.n_agents))) # action
+ elif getattr(args, "adv_hypernet_layers", 1) == 3:
+ self.key_extractors.append(nn.Sequential(nn.Linear(self.state_dim, adv_hypernet_embed),
+ nn.ReLU(),
+ nn.Linear(adv_hypernet_embed, adv_hypernet_embed),
+ nn.ReLU(),
+ nn.Linear(adv_hypernet_embed, 1))) # key
+ self.agents_extractors.append(nn.Sequential(nn.Linear(self.state_dim, adv_hypernet_embed),
+ nn.ReLU(),
+ nn.Linear(adv_hypernet_embed, adv_hypernet_embed),
+ nn.ReLU(),
+ nn.Linear(adv_hypernet_embed, self.n_agents))) # agent
+ self.action_extractors.append(nn.Sequential(nn.Linear(self.state_action_dim, adv_hypernet_embed),
+ nn.ReLU(),
+ nn.Linear(adv_hypernet_embed, adv_hypernet_embed),
+ nn.ReLU(),
+ nn.Linear(adv_hypernet_embed, self.n_agents))) # action
+ else:
+ raise Exception("Error setting number of adv hypernet layers.")
+
+ def forward(self, states, actions):
+ states = states.reshape(-1, self.state_dim)
+ actions = actions.reshape(-1, self.action_dim)
+ data = th.cat([states, actions], dim=1)
+
+ all_head_key = [k_ext(states) for k_ext in self.key_extractors]
+ all_head_agents = [k_ext(states) for k_ext in self.agents_extractors]
+ all_head_action = [sel_ext(data) for sel_ext in self.action_extractors]
+
+ head_attend_weights = []
+ for curr_head_key, curr_head_agents, curr_head_action in zip(all_head_key, all_head_agents, all_head_action):
+ x_key = th.abs(curr_head_key).repeat(1, self.n_agents) + 1e-10
+ x_agents = F.sigmoid(curr_head_agents)
+ x_action = F.sigmoid(curr_head_action)
+ weights = x_key * x_agents * x_action
+ head_attend_weights.append(weights)
+
+ head_attend = th.stack(head_attend_weights, dim=1)
+ head_attend = head_attend.view(-1, self.num_kernel, self.n_agents)
+ head_attend = th.sum(head_attend, dim=1)
+
+ return head_attend
diff --git a/src/modules/mixers/nmix.py b/src/modules/mixers/nmix.py
new file mode 100644
index 0000000..927485f
--- /dev/null
+++ b/src/modules/mixers/nmix.py
@@ -0,0 +1,74 @@
+import torch as th
+import torch.nn as nn
+import torch.nn.functional as F
+import numpy as np
+from utils.th_utils import orthogonal_init_
+from torch.nn import LayerNorm
+
+class Mixer(nn.Module):
+ def __init__(self, args, abs=True):
+ super(Mixer, self).__init__()
+
+ self.args = args
+ self.n_agents = args.n_agents
+ self.embed_dim = args.mixing_embed_dim
+ self.input_dim = self.state_dim = int(np.prod(args.state_shape))
+
+ self.abs = abs # monotonicity constraint
+ self.qmix_pos_func = getattr(self.args, "qmix_pos_func", "abs")
+ assert self.qmix_pos_func == "abs"
+
+ # hyper w1 b1
+ self.hyper_w1 = nn.Sequential(nn.Linear(self.input_dim, args.hypernet_embed),
+ nn.ReLU(inplace=True),
+ nn.Linear(args.hypernet_embed, self.n_agents * self.embed_dim))
+ self.hyper_b1 = nn.Sequential(nn.Linear(self.input_dim, self.embed_dim))
+
+ # hyper w2 b2
+ self.hyper_w2 = nn.Sequential(nn.Linear(self.input_dim, args.hypernet_embed),
+ nn.ReLU(inplace=True),
+ nn.Linear(args.hypernet_embed, self.embed_dim))
+ self.hyper_b2 = nn.Sequential(nn.Linear(self.input_dim, self.embed_dim),
+ nn.ReLU(inplace=True),
+ nn.Linear(self.embed_dim, 1))
+
+ if getattr(args, "use_orthogonal", False):
+ raise NotImplementedError
+ for m in self.modules():
+ orthogonal_init_(m)
+
+ def forward(self, qvals, states):
+ # reshape
+ b, t, _ = qvals.size()
+
+ qvals = qvals.reshape(b * t, 1, self.n_agents)
+ states = states.reshape(-1, self.state_dim)
+
+ # First layer
+ w1 = self.hyper_w1(states).view(-1, self.n_agents, self.embed_dim) # b * t, n_agents, emb
+ b1 = self.hyper_b1(states).view(-1, 1, self.embed_dim)
+
+ # Second layer
+ w2 = self.hyper_w2(states).view(-1, self.embed_dim, 1) # b * t, emb, 1
+ b2= self.hyper_b2(states).view(-1, 1, 1)
+
+ if self.abs:
+ w1 = self.pos_func(w1)
+ w2 = self.pos_func(w2)
+ # print(w1.mean(), w1.var())
+ # print(w2.mean(), w2.var())
+
+ # Forward
+ hidden = F.elu(th.matmul(qvals, w1) + b1) # b * t, 1, emb
+ y = th.matmul(hidden, w2) + b2 # b * t, 1, 1
+
+ return y.view(b, t, -1)
+
+ def pos_func(self, x):
+ if self.qmix_pos_func == "softplus":
+ return th.nn.Softplus(beta=self.args.qmix_pos_func_beta)(x)
+ elif self.qmix_pos_func == "quadratic":
+ return 0.5 * x ** 2
+ else:
+ return th.abs(x)
+
diff --git a/src/modules/mixers/qatten.py b/src/modules/mixers/qatten.py
new file mode 100644
index 0000000..aab641a
--- /dev/null
+++ b/src/modules/mixers/qatten.py
@@ -0,0 +1,100 @@
+import torch as th
+import torch.nn as nn
+import torch.nn.functional as F
+import numpy as np
+
+
+class QattenMixer(nn.Module):
+ def __init__(self, args):
+ super(QattenMixer, self).__init__()
+
+ self.args = args
+ self.n_agents = args.n_agents
+ self.state_dim = int(np.prod(args.state_shape))
+ self.u_dim = int(np.prod(args.agent_own_state_size))
+
+ self.n_query_embedding_layer1 = args.n_query_embedding_layer1
+ self.n_query_embedding_layer2 = args.n_query_embedding_layer2
+ self.n_key_embedding_layer1 = args.n_key_embedding_layer1
+ self.n_head_embedding_layer1 = args.n_head_embedding_layer1
+ self.n_head_embedding_layer2 = args.n_head_embedding_layer2
+ self.n_attention_head = args.n_attention_head
+ self.n_constrant_value = args.n_constrant_value
+
+ self.query_embedding_layers = nn.ModuleList()
+ for i in range(self.n_attention_head):
+ self.query_embedding_layers.append(nn.Sequential(nn.Linear(self.state_dim, self.n_query_embedding_layer1),
+ nn.ReLU(),
+ nn.Linear(self.n_query_embedding_layer1, self.n_query_embedding_layer2)))
+
+ self.key_embedding_layers = nn.ModuleList()
+ for i in range(self.n_attention_head):
+ self.key_embedding_layers.append(nn.Linear(self.u_dim, self.n_key_embedding_layer1))
+
+
+ self.scaled_product_value = np.sqrt(args.n_query_embedding_layer2)
+
+ self.head_embedding_layer = nn.Sequential(nn.Linear(self.state_dim, self.n_head_embedding_layer1),
+ nn.ReLU(),
+ nn.Linear(self.n_head_embedding_layer1, self.n_head_embedding_layer2))
+
+ self.constrant_value_layer = nn.Sequential(nn.Linear(self.state_dim, self.n_constrant_value),
+ nn.ReLU(),
+ nn.Linear(self.n_constrant_value, 1))
+
+
+ def forward(self, agent_qs, states):
+ bs = agent_qs.size(0)
+ states = states.reshape(-1, self.state_dim)
+ us = self._get_us(states)
+ agent_qs = agent_qs.view(-1, 1, self.n_agents)
+
+ q_lambda_list = []
+ for i in range(self.n_attention_head):
+ state_embedding = self.query_embedding_layers[i](states)
+ u_embedding = self.key_embedding_layers[i](us)
+
+ # shape: [-1, 1, state_dim]
+ state_embedding = state_embedding.reshape(-1, 1, self.n_query_embedding_layer2)
+ # shape: [-1, state_dim, n_agent]
+ u_embedding = u_embedding.reshape(-1, self.n_agents, self.n_key_embedding_layer1)
+ u_embedding = u_embedding.permute(0, 2, 1)
+
+ # shape: [-1, 1, n_agent]
+ raw_lambda = th.matmul(state_embedding, u_embedding) / self.scaled_product_value
+ q_lambda = F.softmax(raw_lambda, dim=-1)
+
+ q_lambda_list.append(q_lambda)
+
+ # shape: [-1, n_attention_head, n_agent]
+ q_lambda_list = th.stack(q_lambda_list, dim=1).squeeze(-2)
+
+ # shape: [-1, n_agent, n_attention_head]
+ q_lambda_list = q_lambda_list.permute(0, 2, 1)
+
+ # shape: [-1, 1, n_attention_head]
+ q_h = th.matmul(agent_qs, q_lambda_list)
+
+ if self.args.type == 'weighted':
+ # shape: [-1, n_attention_head, 1]
+ w_h = th.abs(self.head_embedding_layer(states))
+ w_h = w_h.reshape(-1, self.n_head_embedding_layer2, 1)
+
+ # shape: [-1, 1]
+ sum_q_h = th.matmul(q_h, w_h)
+ sum_q_h = sum_q_h.reshape(-1, 1)
+ else:
+ # shape: [-1, 1]
+ sum_q_h = q_h.sum(-1)
+ sum_q_h = sum_q_h.reshape(-1, 1)
+
+ c = self.constrant_value_layer(states)
+ q_tot = sum_q_h + c
+ q_tot = q_tot.view(bs, -1, 1)
+ return q_tot
+
+ def _get_us(self, states):
+ agent_own_state_size = self.args.agent_own_state_size
+ with th.no_grad():
+ us = states[:, :agent_own_state_size*self.n_agents].reshape(-1, agent_own_state_size)
+ return us
\ No newline at end of file
diff --git a/src/modules/mixers/qmix.py b/src/modules/mixers/qmix.py
new file mode 100644
index 0000000..f67516a
--- /dev/null
+++ b/src/modules/mixers/qmix.py
@@ -0,0 +1,84 @@
+import torch as th
+import torch.nn as nn
+import torch.nn.functional as F
+import numpy as np
+
+
+class QMixer(nn.Module):
+ def __init__(self, args):
+ super(QMixer, self).__init__()
+
+ self.args = args
+ self.n_agents = args.n_agents
+ self.state_dim = int(np.prod(args.state_shape))
+
+ self.embed_dim = args.mixing_embed_dim
+ self.abs = getattr(self.args, 'abs', True)
+
+ if getattr(args, "hypernet_layers", 1) == 1:
+ self.hyper_w_1 = nn.Linear(self.state_dim, self.embed_dim * self.n_agents)
+ self.hyper_w_final = nn.Linear(self.state_dim, self.embed_dim)
+ elif getattr(args, "hypernet_layers", 1) == 2:
+ hypernet_embed = self.args.hypernet_embed
+ self.hyper_w_1 = nn.Sequential(nn.Linear(self.state_dim, hypernet_embed),
+ nn.ReLU(inplace=True),
+ nn.Linear(hypernet_embed, self.embed_dim * self.n_agents))
+ self.hyper_w_final = nn.Sequential(nn.Linear(self.state_dim, hypernet_embed),
+ nn.ReLU(inplace=True),
+ nn.Linear(hypernet_embed, self.embed_dim))
+ elif getattr(args, "hypernet_layers", 1) > 2:
+ raise Exception("Sorry >2 hypernet layers is not implemented!")
+ else:
+ raise Exception("Error setting number of hypernet layers.")
+
+ # State dependent bias for hidden layer
+ self.hyper_b_1 = nn.Linear(self.state_dim, self.embed_dim)
+
+ # V(s) instead of a bias for the last layers
+ self.V = nn.Sequential(nn.Linear(self.state_dim, self.embed_dim),
+ nn.ReLU(inplace=True),
+ nn.Linear(self.embed_dim, 1))
+
+
+ def forward(self, agent_qs, states):
+ bs = agent_qs.size(0)
+ states = states.reshape(-1, self.state_dim)
+ agent_qs = agent_qs.reshape(-1, 1, self.n_agents)
+ # First layer
+ w1 = self.hyper_w_1(states).abs() if self.abs else self.hyper_w_1(states)
+ b1 = self.hyper_b_1(states)
+ w1 = w1.view(-1, self.n_agents, self.embed_dim)
+ b1 = b1.view(-1, 1, self.embed_dim)
+ hidden = F.elu(th.bmm(agent_qs, w1) + b1)
+
+ # Second layer
+ w_final = self.hyper_w_final(states).abs() if self.abs else self.hyper_w_final(states)
+ w_final = w_final.view(-1, self.embed_dim, 1)
+ # State-dependent bias
+ v = self.V(states).view(-1, 1, 1)
+ # Compute final output
+ y = th.bmm(hidden, w_final) + v
+ # Reshape and return
+ q_tot = y.view(bs, -1, 1)
+
+ return q_tot
+
+ def k(self, states):
+ bs = states.size(0)
+ w1 = th.abs(self.hyper_w_1(states))
+ w_final = th.abs(self.hyper_w_final(states))
+ w1 = w1.view(-1, self.n_agents, self.embed_dim)
+ w_final = w_final.view(-1, self.embed_dim, 1)
+ k = th.bmm(w1,w_final).view(bs, -1, self.n_agents)
+ k = k / th.sum(k, dim=2, keepdim=True)
+ return k
+
+ def b(self, states):
+ bs = states.size(0)
+ w_final = th.abs(self.hyper_w_final(states))
+ w_final = w_final.view(-1, self.embed_dim, 1)
+ b1 = self.hyper_b_1(states)
+ b1 = b1.view(-1, 1, self.embed_dim)
+ v = self.V(states).view(-1, 1, 1)
+ b = th.bmm(b1, w_final) + v
+ return b
diff --git a/src/modules/mixers/qtran.py b/src/modules/mixers/qtran.py
new file mode 100644
index 0000000..96e8f82
--- /dev/null
+++ b/src/modules/mixers/qtran.py
@@ -0,0 +1,105 @@
+import torch as th
+import torch.nn as nn
+import torch.nn.functional as F
+import numpy as np
+
+
+class QTranBase(nn.Module):
+ def __init__(self, args):
+ super(QTranBase, self).__init__()
+
+ self.args = args
+
+ self.n_agents = args.n_agents
+ self.n_actions = args.n_actions
+ self.state_dim = int(np.prod(args.state_shape))
+ self.arch = self.args.qtran_arch # QTran architecture
+
+ self.embed_dim = args.mixing_embed_dim
+
+ # Q(s,u)
+ if self.arch == "coma_critic":
+ # Q takes [state, u] as input
+ q_input_size = self.state_dim + (self.n_agents * self.n_actions)
+ elif self.arch == "qtran_paper":
+ # Q takes [state, agent_action_observation_encodings]
+ q_input_size = self.state_dim + self.args.rnn_hidden_dim + self.n_actions
+ else:
+ raise Exception("{} is not a valid QTran architecture".format(self.arch))
+
+ if self.args.network_size == "small":
+ self.Q = nn.Sequential(nn.Linear(q_input_size, self.embed_dim),
+ nn.ReLU(inplace=True),
+ nn.Linear(self.embed_dim, self.embed_dim),
+ nn.ReLU(inplace=True),
+ nn.Linear(self.embed_dim, 1))
+
+ # V(s)
+ self.V = nn.Sequential(nn.Linear(self.state_dim, self.embed_dim),
+ nn.ReLU(inplace=True),
+ nn.Linear(self.embed_dim, self.embed_dim),
+ nn.ReLU(inplace=True),
+ nn.Linear(self.embed_dim, 1))
+ ae_input = self.args.rnn_hidden_dim + self.n_actions
+ self.action_encoding = nn.Sequential(nn.Linear(ae_input, ae_input),
+ nn.ReLU(inplace=True),
+ nn.Linear(ae_input, ae_input))
+ elif self.args.network_size == "big":
+ self.Q = nn.Sequential(nn.Linear(q_input_size, self.embed_dim),
+ nn.ReLU(inplace=True),
+ nn.Linear(self.embed_dim, self.embed_dim),
+ nn.ReLU(inplace=True),
+ nn.Linear(self.embed_dim, self.embed_dim),
+ nn.ReLU(inplace=True),
+ nn.Linear(self.embed_dim, 1))
+ # V(s)
+ self.V = nn.Sequential(nn.Linear(self.state_dim, self.embed_dim),
+ nn.ReLU(inplace=True),
+ nn.Linear(self.embed_dim, self.embed_dim),
+ nn.ReLU(inplace=True),
+ nn.Linear(self.embed_dim, self.embed_dim),
+ nn.ReLU(inplace=True),
+ nn.Linear(self.embed_dim, 1))
+ ae_input = self.args.rnn_hidden_dim + self.n_actions
+ self.action_encoding = nn.Sequential(nn.Linear(ae_input, ae_input),
+ nn.ReLU(inplace=True),
+ nn.Linear(ae_input, ae_input))
+ else:
+ assert False
+
+ def forward(self, batch, hidden_states, actions=None):
+ bs = batch.batch_size
+ ts = batch.max_seq_length
+
+ states = batch["state"].reshape(bs * ts, self.state_dim)
+
+ if self.arch == "coma_critic":
+ if actions is None:
+ # Use the actions taken by the agents
+ actions = batch["actions_onehot"].reshape(bs * ts, self.n_agents * self.n_actions)
+ else:
+ # It will arrive as (bs, ts, agents, actions), we need to reshape it
+ actions = actions.reshape(bs * ts, self.n_agents * self.n_actions)
+ inputs = th.cat([states, actions], dim=1)
+ elif self.arch == "qtran_paper":
+ if actions is None:
+ # Use the actions taken by the agents
+ actions = batch["actions_onehot"].reshape(bs * ts, self.n_agents, self.n_actions)
+ else:
+ # It will arrive as (bs, ts, agents, actions), we need to reshape it
+ actions = actions.reshape(bs * ts, self.n_agents, self.n_actions)
+
+ hidden_states = hidden_states.reshape(bs * ts, self.n_agents, -1)
+ agent_state_action_input = th.cat([hidden_states, actions], dim=2)
+ agent_state_action_encoding = self.action_encoding(agent_state_action_input.reshape(bs * ts * self.n_agents, -1)).reshape(bs * ts, self.n_agents, -1)
+ agent_state_action_encoding = agent_state_action_encoding.sum(dim=1) # Sum across agents
+
+ inputs = th.cat([states, agent_state_action_encoding], dim=1)
+
+ q_outputs = self.Q(inputs)
+
+ states = batch["state"].reshape(bs * ts, self.state_dim)
+ v_outputs = self.V(states)
+
+ return q_outputs, v_outputs
+
diff --git a/src/modules/mixers/qtransformer.py b/src/modules/mixers/qtransformer.py
new file mode 100644
index 0000000..791d816
--- /dev/null
+++ b/src/modules/mixers/qtransformer.py
@@ -0,0 +1,158 @@
+import numpy as np
+import torch as th
+import torch.nn as nn
+import torch.nn.functional as F
+from torch.nn.parameter import Parameter
+
+
+class HyperLinear(nn.Module):
+ def __init__(self, entity_num, input_dim, output_dim, hyper_hidden_size, bias=True):
+ super(HyperLinear, self).__init__()
+ self.normalize = False
+ self.entity_num = entity_num
+ self.input_dim = input_dim
+ self.output_dim = output_dim
+
+ self.hypernet = nn.Sequential(
+ nn.Linear(input_dim, hyper_hidden_size),
+ nn.ReLU(inplace=True),
+ nn.Linear(hyper_hidden_size, input_dim * output_dim),
+ # nn.Tanh()
+ )
+ if bias:
+ self.bias = Parameter(th.Tensor(1, output_dim).fill_(0.))
+ else:
+ self.bias = 0
+
+ def forward(self, x):
+ bs, fea_dim = x.shape
+ hyper_out = self.hypernet(x)
+
+ if self.normalize:
+ # [batch_size, input_dim * output_dim] -> [b * t, entity_num, input_dim, output_dim]
+ hyper_out = F.softmax(hyper_out.view(-1, self.entity_num, self.input_dim, self.output_dim), dim=1)
+
+ # [batch_size, input_dim * output_dim] -> [batch_size, input_dim, output_dim]
+ weights = hyper_out.view(bs, self.input_dim, self.output_dim)
+
+ out = th.matmul(x.unsqueeze(1), weights).squeeze(1) + self.bias
+ return out # [batch_size output_dim]
+
+
+class APIEmbeddingLayer(nn.Module):
+ def __init__(self, args, output_dim):
+ super(APIEmbeddingLayer, self).__init__()
+ self.args = args
+ self.n_agents = args.n_agents
+ self.n_enemies = args.n_enemies
+ self.n_actions = args.n_actions
+ self.output_dim = output_dim
+
+ self.embedding_enemy = HyperLinear(args.n_enemies, args.state_enemy_feats_size, output_dim, args.hpn_hyper_dim)
+ self.embedding_ally = HyperLinear(args.n_agents, args.state_ally_feats_size, output_dim, args.hpn_hyper_dim)
+
+ if self.args.env_args["state_last_action"]:
+ self.embedding_action = nn.Linear(args.n_actions, output_dim)
+
+ if self.args.env_args["state_timestep_number"]:
+ self.embedding_timestep = nn.Linear(1, output_dim)
+
+ def forward(self, state_components):
+ ally_features, enemy_features = state_components[:2]
+ ally_features = ally_features.reshape(-1, self.args.state_ally_feats_size)
+ enemy_features = enemy_features.reshape(-1, self.args.state_enemy_feats_size)
+
+ # [bs * t, output_dim]
+ embed_ally = self.embedding_ally(ally_features).view(-1, self.n_agents, self.output_dim).mean(dim=1)
+ embed_enemy = self.embedding_enemy(enemy_features).view(-1, self.n_enemies, self.output_dim).mean(dim=1)
+ output = embed_ally + embed_enemy
+
+ if self.args.env_args["state_last_action"]:
+ n_agent_actions = state_components[2].reshape(-1, self.n_agents, self.n_actions)
+ embed_last_action = self.embedding_action(n_agent_actions).mean(dim=1) # [bs * t,output_dim]
+ output = output + embed_last_action
+
+ if self.args.env_args["state_timestep_number"]:
+ timestep = state_components[-1]
+ embed_timestep = self.embedding_timestep(timestep) # [bs * t, output_dim]
+ output = output + embed_timestep
+
+ return output
+
+
+class APIMixer(nn.Module):
+ """
+ The Mixing Net should be permutation invariant.
+ """
+
+ def __init__(self, args):
+ super(APIMixer, self).__init__()
+
+ self.args = args
+ self.n_agents = args.n_agents
+ self.embed_dim = args.mixing_embed_dim
+ self.input_dim = self.state_dim = int(np.prod(args.state_shape))
+
+ # hyper w1
+ self.hyper_w1 = nn.Sequential(
+ nn.Linear(args.rnn_hidden_dim, 1 * self.embed_dim)
+ )
+
+ # shared PI state embedding
+ self.shared_state_embedding = nn.Sequential(
+ APIEmbeddingLayer(args, args.hypernet_embed),
+ nn.ReLU(inplace=True),
+ )
+
+ # hyper b1
+ self.hyper_b1 = nn.Sequential(
+ nn.Linear(args.hypernet_embed, self.embed_dim)
+ )
+
+ # hyper w2 b2
+ self.hyper_w2 = nn.Sequential(
+ nn.Linear(args.hypernet_embed, self.embed_dim)
+ )
+ self.hyper_b2 = nn.Sequential(
+ nn.Linear(args.hypernet_embed, 1)
+ )
+
+ def forward(self, qvals, states, hidden_states):
+ """
+ :param qvals: individual Q
+ :param states: global state
+ :param hidden_states: GRU output of the agent network, [bs, traj_len, n_agents, hidden_dim]
+ :return:
+ """
+ # reshape
+ b, t, _ = qvals.size()
+
+ qvals = qvals.reshape(b * t, 1, self.n_agents)
+ states = states.reshape(-1, self.state_dim)
+ state_components = th.split(states, self.args.state_component, dim=-1)
+
+ # Shared state embedding
+ state_embedding = self.shared_state_embedding(state_components) # [bs * t, hypernet_embed]
+
+ # First layer
+ w1 = self.hyper_w1(hidden_states).view(-1, self.n_agents, self.embed_dim) # [b * t, n_agents, emb]
+ w1 = F.softmax(w1, dim=1) # already be positive
+ # [b * t, 1, n_agents] * [b * t, n_agents, emb]
+
+ b1 = self.hyper_b1(state_embedding).view(-1, 1, self.embed_dim)
+
+ # Second layer
+ w2 = self.hyper_w2(state_embedding).view(-1, self.embed_dim, 1) # [b * t, emb, 1]
+ b2 = self.hyper_b2(state_embedding).view(-1, 1, 1) # [b * t, 1, 1]
+
+ # positive weight
+ # w1 = th.abs(w1)
+ w2 = th.abs(w2)
+ # print(w1.mean(), w1.var())
+ # print(w2.mean(), w2.var())
+
+ # Forward
+ hidden = F.elu(th.matmul(qvals, w1) + b1) # [b * t, 1, emb]
+ y = th.matmul(hidden, w2) + b2 # b * t, 1, 1
+
+ return y.view(b, t, -1)
diff --git a/src/modules/mixers/qtransformer_v0.py b/src/modules/mixers/qtransformer_v0.py
new file mode 100644
index 0000000..5e1495f
--- /dev/null
+++ b/src/modules/mixers/qtransformer_v0.py
@@ -0,0 +1,182 @@
+import numpy as np
+import torch as th
+import torch.nn as nn
+import torch.nn.functional as F
+from torch.nn.parameter import Parameter
+
+
+class TokenLayer(nn.Module):
+ def __init__(self, args, token_dim):
+ super(TokenLayer, self).__init__()
+ self.args = args
+ self.n_agents = args.n_agents
+ self.n_enemies = args.n_enemies
+ self.n_actions = args.n_actions
+
+ self.embedding_ally = nn.Linear(args.state_ally_feats_size, token_dim)
+ self.embedding_enemy = nn.Linear(args.state_enemy_feats_size, token_dim)
+
+ if self.args.env_args["state_last_action"]:
+ self.embedding_action = nn.Linear(args.n_actions, token_dim)
+
+ if self.args.env_args["state_timestep_number"]:
+ self.embedding_timestep = nn.Linear(1, token_dim)
+
+ def forward(self, state_components):
+ ally_features, enemy_features = state_components[:2]
+ ally_features = ally_features.reshape(-1, self.n_agents, self.args.state_ally_feats_size)
+ enemy_features = enemy_features.reshape(-1, self.n_enemies, self.args.state_enemy_feats_size)
+
+ embed_ally = self.embedding_ally(ally_features) # [bs * t, n_agents, embed_dim]
+ embed_enemy = self.embedding_enemy(enemy_features) # [bs * t, n_enemies, embed_dim]
+ tokens = [embed_ally, embed_enemy]
+
+ if self.args.env_args["state_last_action"]:
+ n_agent_actions = state_components[2].reshape(-1, self.n_agents, self.n_actions)
+ embed_last_action = self.embedding_action(n_agent_actions) # [bs * t, n_agents, embed_dim]
+ tokens.append(embed_last_action)
+
+ if self.args.env_args["state_timestep_number"]:
+ timestep = state_components[-1]
+ embed_timestep = self.embedding_timestep(timestep).unsqueeze(dim=-2) # [bs * t, 1, embed_dim]
+ tokens.append(embed_timestep)
+
+ tokens = th.cat(tokens, dim=-2)
+ return tokens # [bs * t, entity_num, embed_dim]
+
+
+class SelfAttention(nn.Module):
+ def __init__(self, emb_dim, shared_query, end_index, heads=1):
+ super(SelfAttention, self).__init__()
+
+ self.emb_dim = emb_dim
+ self.heads = heads
+ self.shared_query = shared_query
+ self.end_index = end_index
+
+ if shared_query:
+ self.queries = Parameter(th.Tensor(1, emb_dim * heads))
+ nn.init.normal_(self.queries)
+ self.end_index = 1
+ else:
+ self.toqueries = nn.Linear(emb_dim, emb_dim * heads, bias=False)
+ self.tokeys = nn.Linear(emb_dim, emb_dim * heads, bias=False)
+ self.tovalues = nn.Linear(emb_dim, emb_dim * heads, bias=False)
+
+ if self.heads > 1:
+ self.unifyheads = nn.Linear(heads * emb_dim, emb_dim)
+
+ def forward(self, x):
+ b, t, e = x.size() # [bs, sequence_length, token_dim]
+ h = self.heads
+
+ if self.shared_query:
+ queries = self.queries.expand(b, -1).view(b, 1, h, e)
+ else:
+ queries = self.toqueries(x).view(b, t, h, e)
+ keys = self.tokeys(x).view(b, t, h, e)
+ values = self.tovalues(x).view(b, t, h, e)
+
+ # compute scaled dot-product self-attention
+
+ # - fold heads into the batch dimension
+ if self.shared_query:
+ queries = queries.transpose(1, 2).contiguous().view(b * h, 1, e)
+ else:
+ queries = queries.transpose(1, 2).contiguous().view(b * h, t, e)
+ keys = keys.transpose(1, 2).contiguous().view(b * h, t, e)
+ values = values.transpose(1, 2).contiguous().view(b * h, t, e)
+
+ # - Instead of dividing the dot products by sqrt(e), we scale the keys and values.
+ # This should be more memory efficient
+ queries = queries[:, :self.end_index] / (e ** (1 / 4)) # [b * h, entity_num, e]
+ keys = keys / (e ** (1 / 4)) # [b * h, t, e]
+
+ # - get dot product of queries and keys, and scale
+ dot = th.bmm(queries, keys.transpose(1, 2)) # [b * h, entity_num, t]
+
+ assert dot.size() == (b * h, self.end_index, t)
+
+ # - dot now has row-wise self-attention probabilities
+ dot = F.softmax(dot, dim=2) # [b * h, entity_num, t]
+
+ # apply the self attention to the values, [b * h, entity_num, t] * [b * h, t, token_dim] = [b * h, entity_num, token_dim]
+ out = th.bmm(dot, values).view(b, h, self.end_index, e) # [b, h, entity_num, token_dim]
+
+ # swap h, t back, unify heads
+ out = out.transpose(1, 2).contiguous().view(b, self.end_index, h * e)
+
+ if self.heads > 1:
+ return self.unifyheads(out) # [b, entity_num, token_dim]
+ else:
+ return out # [b, entity_num, token_dim]
+
+
+class TransformerMixer(nn.Module):
+ """
+ The Mixing Net should be permutation invariant.
+ """
+
+ def __init__(self, args):
+ super(TransformerMixer, self).__init__()
+
+ self.args = args
+ self.n_agents = args.n_agents
+ self.embed_dim = args.mixing_embed_dim
+ self.input_dim = self.state_dim = int(np.prod(args.state_shape))
+
+ # hyper w1 b1
+ self.hyper_w1 = nn.Sequential(
+ TokenLayer(args, args.hypernet_embed),
+ SelfAttention(args.hypernet_embed, shared_query=False, end_index=self.n_agents, heads=1),
+ nn.Linear(args.hypernet_embed, self.embed_dim)
+ )
+ self.hyper_b1 = nn.Sequential(
+ TokenLayer(args, self.embed_dim),
+ SelfAttention(self.embed_dim, shared_query=True, end_index=1, heads=1)
+ )
+
+ # hyper w2 b2
+ self.hyper_w2 = nn.Sequential(
+ TokenLayer(args, args.hypernet_embed),
+ SelfAttention(args.hypernet_embed, shared_query=True, end_index=1, heads=1),
+ nn.Linear(args.hypernet_embed, self.embed_dim)
+ )
+ self.hyper_b2 = nn.Sequential(
+ TokenLayer(args, self.embed_dim),
+ SelfAttention(self.embed_dim, shared_query=True, end_index=1, heads=1),
+ nn.Linear(self.embed_dim, 1)
+ )
+
+ def forward(self, qvals, states):
+ """
+ :param qvals: individual Q
+ :param states: global state
+ :return:
+ """
+ # reshape
+ b, t, _ = qvals.size()
+
+ qvals = qvals.view(b * t, 1, self.n_agents)
+ states = states.reshape(-1, self.state_dim)
+ state_components = th.split(states, self.args.state_component, dim=-1)
+
+ # First layer
+ w1 = self.hyper_w1(state_components).view(-1, self.n_agents, self.embed_dim) # [b * t, n_agents, emb]
+ b1 = self.hyper_b1(state_components).view(-1, 1, self.embed_dim)
+
+ # Second layer
+ w2 = self.hyper_w2(state_components).view(-1, self.embed_dim, 1) # b * t, emb, 1
+ b2 = self.hyper_b2(state_components).view(-1, 1, 1)
+
+ # positive weight
+ w1 = th.abs(w1)
+ w2 = th.abs(w2)
+ # print(w1.mean(), w1.var())
+ # print(w2.mean(), w2.var())
+
+ # Forward
+ hidden = F.elu(th.matmul(qvals, w1) + b1) # [b * t, 1, emb]
+ y = th.matmul(hidden, w2) + b2 # b * t, 1, 1
+
+ return y.view(b, t, -1)
diff --git a/src/modules/mixers/vdn.py b/src/modules/mixers/vdn.py
new file mode 100644
index 0000000..fc05b63
--- /dev/null
+++ b/src/modules/mixers/vdn.py
@@ -0,0 +1,10 @@
+import torch as th
+import torch.nn as nn
+
+
+class VDNMixer(nn.Module):
+ def __init__(self):
+ super(VDNMixer, self).__init__()
+
+ def forward(self, agent_qs, batch):
+ return th.sum(agent_qs, dim=2, keepdim=True)
\ No newline at end of file
diff --git a/src/run/__init__.py b/src/run/__init__.py
new file mode 100644
index 0000000..5383579
--- /dev/null
+++ b/src/run/__init__.py
@@ -0,0 +1,4 @@
+from .run import run as default_run
+
+REGISTRY = {}
+REGISTRY["default"] = default_run
\ No newline at end of file
diff --git a/src/run/run.py b/src/run/run.py
new file mode 100644
index 0000000..8775d48
--- /dev/null
+++ b/src/run/run.py
@@ -0,0 +1,334 @@
+import datetime
+import os
+import pprint
+import time
+import threading
+import torch as th
+from types import SimpleNamespace as SN
+from utils.logging import Logger
+from utils.timehelper import time_left, time_str
+from os.path import dirname, abspath
+#import sys
+
+from learners import REGISTRY as le_REGISTRY
+from runners import REGISTRY as r_REGISTRY
+from controllers import REGISTRY as mac_REGISTRY
+from components.episode_buffer import ReplayBuffer
+from components.transforms import OneHot
+
+
+def run(_run, _config, _log):
+ # check args sanity
+ _config = args_sanity_check(_config, _log)
+
+ args = SN(**_config)
+
+ th.set_num_threads(args.thread_num)
+ # th.set_num_interop_threads(8)
+
+ args.device = "cuda" if args.use_cuda else "cpu"
+
+ # setup loggers
+ logger = Logger(_log)
+
+ _log.info("Experiment Parameters:")
+ experiment_params = pprint.pformat(_config,
+ indent=4,
+ width=1)
+ _log.info("\n\n" + experiment_params + "\n")
+
+ # configure tensorboard logger
+ unique_token = "{}__{}".format(args.name, datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
+ args.unique_token = unique_token
+
+ testing_algorithms = ["vdn", "qmix", "hpn_vdn", "hpn_qmix",
+ "deepset_vdn", "deepset_qmix", "deepset_hyper_vdn", "deepset_hyper_qmix",
+ "updet_vdn", "updet_qmix", "vdn_DA", "qmix_DA",
+ "gnn_vdn", "gnn_qmix", "qplex", "hpn_qplex", "asn"
+ ]
+ env_name = args.env
+ logdir = env_name
+ if env_name in ["sc2", "sc2_v2", ]:
+ logdir = os.path.join("{}_{}-obs_aid={}-obs_act={}".format(
+ logdir,
+ args.env_args["map_name"],
+ int(args.obs_agent_id),
+ int(args.obs_last_action),
+ ))
+ if env_name == "sc2_v2":
+ logdir = logdir + "-conic_fov={}".format(
+ "1-change_fov_by_move={}".format(
+ int(args.env_args["change_fov_with_move"])) if args.env_args["conic_fov"] else "0"
+ )
+ logdir = os.path.join(logdir,
+ "algo={}-agent={}".format(args.name, args.agent),
+ "env_n={}".format(
+ args.batch_size_run,
+ ))
+ if args.name in testing_algorithms:
+ if args.name in ["vdn_DA", "qmix_DA", ]:
+ logdir = os.path.join(logdir,
+ "{}-data_augment={}".format(
+ args.mixer, args.augment_times
+ ))
+ elif args.name in ["gnn_vdn", "gnn_qmix"]:
+ logdir = os.path.join(logdir,
+ "{}-layer_num={}".format(
+ args.mixer, args.gnn_layer_num
+ ))
+ elif args.name in ["vdn", "qmix", "deepset_vdn", "deepset_qmix", "qplex", "asn"]:
+ logdir = os.path.join(logdir,
+ "mixer={}".format(
+ args.mixer,
+ ))
+ elif args.name in ["updet_vdn", "updet_qmix"]:
+ logdir = os.path.join(logdir,
+ "mixer={}-att_dim={}-att_head={}-att_layer={}".format(
+ args.mixer,
+ args.transformer_embed_dim,
+ args.transformer_heads,
+ args.transformer_depth,
+ ))
+ elif args.name in ["deepset_hyper_vdn", "deepset_hyper_qmix"]:
+ logdir = os.path.join(logdir,
+ "mixer={}-hpn_hyperdim={}".format(
+ args.mixer,
+ args.hpn_hyper_dim,
+ ))
+ elif args.name in ["hpn_vdn", "hpn_qmix", "hpn_qplex"]:
+ logdir = os.path.join(logdir,
+ "head_n={}-mixer={}-hpn_hyperdim={}-acti={}".format(
+ args.hpn_head_num,
+ args.mixer,
+ args.hpn_hyper_dim,
+ args.hpn_hyper_activation,
+ ))
+
+ logdir = os.path.join(logdir,
+ "rnn_dim={}-2bs={}_{}-tdlambda={}-epdec_{}={}k".format(
+ args.rnn_hidden_dim,
+ args.buffer_size,
+ args.batch_size,
+ args.td_lambda,
+ args.epsilon_finish,
+ args.epsilon_anneal_time // 1000,
+ ))
+ args.log_model_dir = logdir
+ if args.use_tensorboard:
+ tb_logs_direc = os.path.join(dirname(dirname(dirname(abspath(__file__)))), args.local_results_path, "tb_logs")
+ tb_exp_direc = os.path.join(tb_logs_direc, "{}").format(unique_token)
+ if args.name in testing_algorithms: # add parameter config to the logger path!
+ tb_exp_direc = os.path.join(tb_logs_direc, logdir, unique_token)
+ logger.setup_tb(tb_exp_direc)
+
+ # sacred is on by default
+ logger.setup_sacred(_run)
+
+ # Run and train
+ run_sequential(args=args, logger=logger)
+
+ # Clean up after finishing
+ print("Exiting Main")
+
+ print("Stopping all threads")
+ for t in threading.enumerate():
+ if t.name != "MainThread":
+ print("Thread {} is alive! Is daemon: {}".format(t.name, t.daemon))
+ t.join(timeout=1)
+ print("Thread joined")
+
+ print("Exiting script")
+
+ # Making sure framework really exits
+ os._exit(os.EX_OK)
+
+
+def evaluate_sequential(args, runner):
+ for _ in range(args.test_nepisode):
+ runner.run(test_mode=True)
+
+ if args.save_replay:
+ runner.save_replay()
+
+ runner.close_env()
+
+
+def run_sequential(args, logger):
+ # Init runner so we can get env info
+ runner = r_REGISTRY[args.runner](args=args, logger=logger)
+
+ # Set up schemes and groups here
+ env_info = runner.get_env_info()
+ args.n_agents = env_info["n_agents"]
+ args.n_actions = env_info["n_actions"]
+ args.state_shape = env_info["state_shape"]
+ args.obs_shape = env_info["obs_shape"]
+ args.accumulated_episodes = getattr(args, "accumulated_episodes", None)
+
+ if args.env in ["sc2", "sc2_v2", "gfootball"]:
+ if args.env in ["sc2", "sc2_v2"]:
+ args.output_normal_actions = env_info["n_normal_actions"]
+ args.n_enemies = env_info["n_enemies"]
+ args.n_allies = env_info["n_allies"]
+ # args.obs_ally_feats_size = env_info["obs_ally_feats_size"]
+ # args.obs_enemy_feats_size = env_info["obs_enemy_feats_size"]
+ args.state_ally_feats_size = env_info["state_ally_feats_size"]
+ args.state_enemy_feats_size = env_info["state_enemy_feats_size"]
+ args.obs_component = env_info["obs_component"]
+ args.state_component = env_info["state_component"]
+ args.map_type = env_info["map_type"]
+ args.agent_own_state_size = env_info["state_ally_feats_size"]
+
+ # Default/Base scheme
+ scheme = {
+ "state": {"vshape": env_info["state_shape"]},
+ "obs": {"vshape": env_info["obs_shape"], "group": "agents"},
+ "actions": {"vshape": (1,), "group": "agents", "dtype": th.long},
+ "avail_actions": {"vshape": (env_info["n_actions"],), "group": "agents", "dtype": th.int},
+ "probs": {"vshape": (env_info["n_actions"],), "group": "agents", "dtype": th.float},
+ "reward": {"vshape": (1,)},
+ "terminated": {"vshape": (1,), "dtype": th.uint8},
+ }
+ groups = {
+ "agents": args.n_agents
+ }
+ preprocess = {
+ "actions": ("actions_onehot", [OneHot(out_dim=args.n_actions)])
+ }
+ # [batch, episode_length, n_agents, feature_dim]
+ buffer = ReplayBuffer(scheme, groups, args.buffer_size, env_info["episode_limit"] + 1,
+ preprocess=preprocess,
+ device="cpu" if args.buffer_cpu_only else args.device)
+ # Setup multiagent controller here
+ mac = mac_REGISTRY[args.mac](buffer.scheme, groups, args)
+
+ # Give runner the scheme
+ runner.setup(scheme=scheme, groups=groups, preprocess=preprocess, mac=mac)
+
+ # Learner
+ learner = le_REGISTRY[args.learner](mac, buffer.scheme, logger, args)
+
+ if args.use_cuda:
+ learner.cuda()
+
+ if args.checkpoint_path != "":
+ timesteps = []
+ timestep_to_load = 0
+
+ if not os.path.isdir(args.checkpoint_path):
+ logger.console_logger.info("Checkpoint directiory {} doesn't exist".format(args.checkpoint_path))
+ return
+
+ # Go through all files in args.checkpoint_path
+ for name in os.listdir(args.checkpoint_path):
+ full_name = os.path.join(args.checkpoint_path, name)
+ # Check if they are dirs the names of which are numbers
+ if os.path.isdir(full_name) and name.isdigit():
+ timesteps.append(int(name))
+
+ if args.load_step == 0:
+ # choose the max timestep
+ timestep_to_load = max(timesteps)
+ else:
+ # choose the timestep closest to load_step
+ timestep_to_load = min(timesteps, key=lambda x: abs(x - args.load_step))
+
+ model_path = os.path.join(args.checkpoint_path, str(timestep_to_load))
+
+ logger.console_logger.info("Loading model from {}".format(model_path))
+ learner.load_models(model_path)
+ runner.t_env = timestep_to_load
+
+ if args.evaluate or args.save_replay:
+ evaluate_sequential(args, runner)
+ return
+
+ # start training
+ episode = 0
+ last_test_T = -args.test_interval - 1
+ last_log_T = 0
+ model_save_time = 0
+
+ start_time = time.time()
+ last_time = start_time
+
+ logger.console_logger.info("Beginning training for {} timesteps".format(args.t_max))
+
+ while runner.t_env <= args.t_max:
+ # Run for a whole episode at a time
+ with th.no_grad():
+ # t_start = time.time()
+ episode_batch = runner.run(test_mode=False)
+ if episode_batch.batch_size > 0: # After clearing the batch data, the batch may be empty.
+ buffer.insert_episode_batch(episode_batch)
+ # print("Sample new batch cost {} seconds.".format(time.time() - t_start))
+ episode += args.batch_size_run
+
+ if buffer.can_sample(args.batch_size):
+ if args.accumulated_episodes and episode % args.accumulated_episodes != 0:
+ continue
+
+ episode_sample = buffer.sample(args.batch_size)
+
+ # Truncate batch to only filled timesteps
+ max_ep_t = episode_sample.max_t_filled()
+ episode_sample = episode_sample[:, :max_ep_t]
+
+ if episode_sample.device != args.device:
+ episode_sample.to(args.device)
+
+ learner.train(episode_sample, runner.t_env, episode)
+ del episode_sample
+
+ # Execute test runs once in a while
+ n_test_runs = max(1, args.test_nepisode // runner.batch_size)
+ if (runner.t_env - last_test_T) / args.test_interval >= 1.0:
+ logger.console_logger.info("t_env: {} / {}".format(runner.t_env, args.t_max))
+ logger.console_logger.info("Estimated time left: {}. Time passed: {}".format(
+ time_left(last_time, last_test_T, runner.t_env, args.t_max), time_str(time.time() - start_time)))
+ last_time = time.time()
+ last_test_T = runner.t_env
+ with th.no_grad():
+ for _ in range(n_test_runs):
+ runner.run(test_mode=True)
+
+ if args.save_model and (
+ runner.t_env - model_save_time >= args.save_model_interval or runner.t_env >= args.t_max):
+ model_save_time = runner.t_env
+ save_path = os.path.join(args.local_results_path, "models", args.log_model_dir, args.unique_token,
+ str(runner.t_env))
+ # "results/models/{}".format(unique_token)
+ os.makedirs(save_path, exist_ok=True)
+ logger.console_logger.info("Saving models to {}".format(save_path))
+
+ # learner should handle saving/loading -- delegate actor save/load to mac,
+ # use appropriate filenames to do critics, optimizer states
+ learner.save_models(save_path)
+
+ if (runner.t_env - last_log_T) >= args.log_interval:
+ logger.log_stat("episode", episode, runner.t_env)
+ logger.log_stat("episode_in_buffer", buffer.episodes_in_buffer, runner.t_env)
+ logger.print_recent_stats()
+ last_log_T = runner.t_env
+
+ runner.close_env()
+ logger.console_logger.info("Finished Training")
+
+ # flush
+ sys.stdout.flush()
+ time.sleep(10)
+
+
+def args_sanity_check(config, _log):
+ # set CUDA flags
+ # config["use_cuda"] = True # Use cuda whenever possible!
+ if config["use_cuda"] and not th.cuda.is_available():
+ config["use_cuda"] = False
+ _log.warning("CUDA flag use_cuda was switched OFF automatically because no CUDA devices are available!")
+
+ if config["test_nepisode"] < config["batch_size_run"]:
+ config["test_nepisode"] = config["batch_size_run"]
+ else:
+ config["test_nepisode"] = (config["test_nepisode"] // config["batch_size_run"]) * config["batch_size_run"]
+
+ return config
diff --git a/src/runners/__init__.py b/src/runners/__init__.py
new file mode 100644
index 0000000..c5c1306
--- /dev/null
+++ b/src/runners/__init__.py
@@ -0,0 +1,7 @@
+REGISTRY = {}
+
+from .episode_runner import EpisodeRunner
+REGISTRY["episode"] = EpisodeRunner
+
+from .parallel_runner import ParallelRunner
+REGISTRY["parallel"] = ParallelRunner
diff --git a/src/runners/episode_runner.py b/src/runners/episode_runner.py
new file mode 100644
index 0000000..40a7c84
--- /dev/null
+++ b/src/runners/episode_runner.py
@@ -0,0 +1,146 @@
+from envs import REGISTRY as env_REGISTRY
+from functools import partial
+from components.episode_buffer import EpisodeBatch
+import numpy as np
+import time
+
+
+class EpisodeRunner:
+
+ def __init__(self, args, logger):
+ self.args = args
+ self.logger = logger
+ self.batch_size = self.args.batch_size_run
+ if self.batch_size > 1:
+ self.batch_size = 1
+ logger.console_logger.warning("Reset the `batch_size_run' to 1...")
+
+ self.env = env_REGISTRY[self.args.env](**self.args.env_args)
+ if self.args.evaluate:
+ print("Waiting the environment to start...")
+ time.sleep(5)
+ self.episode_limit = self.env.episode_limit
+ self.t = 0
+
+ self.t_env = 0
+
+ self.train_returns = []
+ self.test_returns = []
+ self.train_stats = {}
+ self.test_stats = {}
+
+ # Log the first run
+ self.log_train_stats_t = -1000000
+
+ def setup(self, scheme, groups, preprocess, mac):
+ if self.args.use_cuda and not self.args.cpu_inference:
+ self.batch_device = self.args.device
+ else:
+ self.batch_device = "cpu" if self.args.buffer_cpu_only else self.args.device
+ print(" &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& self.batch_device={}".format(
+ self.batch_device))
+ self.new_batch = partial(EpisodeBatch, scheme, groups, self.batch_size, self.episode_limit + 1,
+ preprocess=preprocess, device=self.batch_device)
+ self.mac = mac
+
+ def get_env_info(self):
+ return self.env.get_env_info()
+
+ def save_replay(self):
+ self.env.save_replay()
+
+ def close_env(self):
+ self.env.close()
+
+ def reset(self):
+ self.batch = self.new_batch()
+ if (self.args.use_cuda and self.args.cpu_inference) and str(self.mac.get_device()) != "cpu":
+ self.mac.cpu() # copy model to cpu
+
+ self.env.reset()
+ self.t = 0
+
+ def run(self, test_mode=False):
+ self.reset()
+
+ terminated = False
+ episode_return = 0
+ self.mac.init_hidden(batch_size=self.batch_size)
+
+ while not terminated:
+ pre_transition_data = {
+ "state": [self.env.get_state()],
+ "avail_actions": [self.env.get_avail_actions()],
+ "obs": [self.env.get_obs()]
+ }
+ self.batch.update(pre_transition_data, ts=self.t)
+
+ # Pass the entire batch of experiences up till now to the agents
+ # Receive the actions for each agent at this timestep in a batch of size 1
+ actions = self.mac.select_actions(self.batch, t_ep=self.t, t_env=self.t_env, test_mode=test_mode)
+ # Fix memory leak
+ cpu_actions = actions.to("cpu").numpy()
+
+ reward, terminated, env_info = self.env.step(actions[0])
+ episode_return += reward
+
+ post_transition_data = {
+ "actions": cpu_actions,
+ "reward": [(reward,)],
+ "terminated": [(terminated != env_info.get("episode_limit", False),)],
+ }
+
+ self.batch.update(post_transition_data, ts=self.t)
+
+ if self.args.evaluate:
+ time.sleep(1)
+ print(self.t, post_transition_data["reward"])
+
+ self.t += 1
+
+ last_data = {
+ "state": [self.env.get_state()],
+ "avail_actions": [self.env.get_avail_actions()],
+ "obs": [self.env.get_obs()]
+ }
+ self.batch.update(last_data, ts=self.t)
+
+ # Select actions in the last stored state
+ actions = self.mac.select_actions(self.batch, t_ep=self.t, t_env=self.t_env, test_mode=test_mode)
+ # Fix memory leak
+ cpu_actions = actions.to("cpu").numpy()
+ self.batch.update({"actions": cpu_actions}, ts=self.t)
+
+ cur_stats = self.test_stats if test_mode else self.train_stats
+ cur_returns = self.test_returns if test_mode else self.train_returns
+ log_prefix = "test_" if test_mode else ""
+ cur_stats.update({k: cur_stats.get(k, 0) + env_info.get(k, 0) for k in set(cur_stats) | set(env_info)})
+ cur_stats["n_episodes"] = 1 + cur_stats.get("n_episodes", 0)
+ cur_stats["ep_length"] = self.t + cur_stats.get("ep_length", 0)
+
+ if not test_mode:
+ self.t_env += self.t
+
+ cur_returns.append(episode_return)
+
+ if test_mode and (len(self.test_returns) == self.args.test_nepisode):
+ self._log(cur_returns, cur_stats, log_prefix)
+ elif not test_mode and self.t_env - self.log_train_stats_t >= self.args.runner_log_interval:
+ self._log(cur_returns, cur_stats, log_prefix)
+ if hasattr(self.mac.action_selector, "epsilon"):
+ self.logger.log_stat("epsilon", self.mac.action_selector.epsilon, self.t_env)
+ self.log_train_stats_t = self.t_env
+
+ return self.batch
+
+ def _log(self, returns, stats, prefix):
+ self.logger.log_stat(prefix + "return_min", np.min(returns), self.t_env)
+ self.logger.log_stat(prefix + "return_max", np.max(returns), self.t_env)
+ self.logger.log_stat(prefix + "return_mean", np.mean(returns), self.t_env)
+ self.logger.log_stat(prefix + "return_std", np.std(returns), self.t_env)
+ returns.clear()
+
+ for k, v in stats.items():
+ if k != "n_episodes":
+ self.logger.log_stat(prefix + k + "_mean", v / stats["n_episodes"], self.t_env)
+ stats.clear()
diff --git a/src/runners/parallel_runner.py b/src/runners/parallel_runner.py
new file mode 100644
index 0000000..ab6e04a
--- /dev/null
+++ b/src/runners/parallel_runner.py
@@ -0,0 +1,310 @@
+from envs import REGISTRY as env_REGISTRY
+from functools import partial
+from components.episode_buffer import EpisodeBatch
+from multiprocessing import Pipe, Process
+
+import numpy as np
+import time
+
+
+# Based (very) heavily on SubprocVecEnv from OpenAI Baselines
+# https://github.com/openai/baselines/blob/master/baselines/common/vec_env/subproc_vec_env.py
+class ParallelRunner:
+
+ def __init__(self, args, logger):
+ self.args = args
+ self.logger = logger
+ self.batch_size = self.args.batch_size_run
+
+ # Make subprocesses for the envs
+ self.parent_conns, self.worker_conns = zip(*[Pipe() for _ in range(self.batch_size)])
+ env_fn = env_REGISTRY[self.args.env]
+ self.ps = []
+ for i, worker_conn in enumerate(self.worker_conns):
+ ps = Process(target=env_worker,
+ args=(worker_conn, CloudpickleWrapper(partial(env_fn, **self.args.env_args))))
+ self.ps.append(ps)
+ if self.args.evaluate:
+ print("Waiting the environment to start...")
+ time.sleep(5)
+
+ for p in self.ps:
+ p.daemon = True
+ p.start()
+
+ self.parent_conns[0].send(("get_env_info", None))
+ self.env_info = self.parent_conns[0].recv()
+ self.episode_limit = self.env_info["episode_limit"]
+
+ self.t = 0
+
+ self.t_env = 0
+
+ self.train_returns = []
+ self.test_returns = []
+ self.train_stats = {}
+ self.test_stats = {}
+
+ self.log_train_stats_t = -100000
+
+ def setup(self, scheme, groups, preprocess, mac):
+ if self.args.use_cuda and not self.args.cpu_inference:
+ self.batch_device = self.args.device
+ else:
+ self.batch_device = "cpu" if self.args.buffer_cpu_only else self.args.device
+ #print(" &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& self.batch_device={}".format(
+ # self.batch_device))
+ self.new_batch = partial(EpisodeBatch, scheme, groups, self.batch_size, self.episode_limit + 1,
+ preprocess=preprocess, device=self.batch_device)
+ self.mac = mac
+ self.scheme = scheme
+ self.groups = groups
+ self.preprocess = preprocess
+
+ def get_env_info(self):
+ return self.env_info
+
+ def save_replay(self):
+ pass
+
+ def close_env(self):
+ for parent_conn in self.parent_conns:
+ parent_conn.send(("close", None))
+
+ def reset(self):
+ self.batch = self.new_batch()
+
+ if (self.args.use_cuda and self.args.cpu_inference) and str(self.mac.get_device()) != "cpu":
+ self.mac.cpu() # copy model to cpu
+
+ # Reset the envs
+ for parent_conn in self.parent_conns:
+ parent_conn.send(("reset", None))
+
+ pre_transition_data = {
+ "state": [],
+ "avail_actions": [],
+ "obs": []
+ }
+ # Get the obs, state and avail_actions back
+ for parent_conn in self.parent_conns:
+ data = parent_conn.recv()
+ pre_transition_data["state"].append(data["state"])
+ pre_transition_data["avail_actions"].append(data["avail_actions"])
+ pre_transition_data["obs"].append(data["obs"])
+
+ self.batch.update(pre_transition_data, ts=0, mark_filled=True)
+
+ self.t = 0
+ self.env_steps_this_run = 0
+
+ def run(self, test_mode=False):
+ self.reset()
+
+ all_terminated = False
+ episode_returns = [0 for _ in range(self.batch_size)]
+ episode_lengths = [0 for _ in range(self.batch_size)]
+ self.mac.init_hidden(batch_size=self.batch_size)
+ terminated = [False for _ in range(self.batch_size)]
+ envs_not_terminated = [b_idx for b_idx, termed in enumerate(terminated) if not termed]
+ final_env_infos = [] # may store extra stats like battle won. this is filled in ORDER OF TERMINATION
+
+ save_probs = getattr(self.args, "save_probs", False)
+ while True:
+ # Pass the entire batch of experiences up till now to the agents
+ # Receive the actions for each agent at this timestep in a batch for each un-terminated env
+ if save_probs:
+ actions, probs = self.mac.select_actions(self.batch, t_ep=self.t, t_env=self.t_env,
+ bs=envs_not_terminated, test_mode=test_mode)
+ else:
+ actions = self.mac.select_actions(self.batch, t_ep=self.t, t_env=self.t_env, bs=envs_not_terminated,
+ test_mode=test_mode)
+
+ cpu_actions = actions.to("cpu").numpy()
+
+ # Update the actions taken
+ actions_chosen = {
+ "actions": np.expand_dims(cpu_actions, axis=1),
+ }
+ if save_probs:
+ actions_chosen["probs"] = probs.unsqueeze(1).to("cpu")
+
+ self.batch.update(actions_chosen, bs=envs_not_terminated, ts=self.t, mark_filled=False)
+
+ # Send actions to each env
+ action_idx = 0
+ for idx, parent_conn in enumerate(self.parent_conns):
+ if idx in envs_not_terminated: # We produced actions for this env
+ if not terminated[idx]: # Only send the actions to the env if it hasn't terminated
+ parent_conn.send(("step", cpu_actions[action_idx]))
+ action_idx += 1 # actions is not a list over every env
+
+ # # Update envs_not_terminated
+ # envs_not_terminated = [b_idx for b_idx, termed in enumerate(terminated) if not termed]
+ # all_terminated = all(terminated)
+ # if all_terminated:
+ # break
+
+ # Post step data we will insert for the current timestep
+ post_transition_data = {
+ "reward": [],
+ "terminated": []
+ }
+ # Data for the next step we will insert in order to select an action
+ pre_transition_data = {
+ "state": [],
+ "avail_actions": [],
+ "obs": []
+ }
+ # Receive data back for each unterminated env
+ for idx, parent_conn in enumerate(self.parent_conns):
+ if not terminated[idx]:
+ data = parent_conn.recv()
+ # Remaining data for this current timestep
+ post_transition_data["reward"].append((data["reward"],))
+
+ episode_returns[idx] += data["reward"]
+ episode_lengths[idx] += 1
+ if not test_mode:
+ self.env_steps_this_run += 1
+
+ env_terminated = False
+ if data["terminated"]:
+ final_env_infos.append(data["info"])
+ if data["terminated"] and not data["info"].get("episode_limit", False):
+ env_terminated = True
+ terminated[idx] = data["terminated"]
+ post_transition_data["terminated"].append((env_terminated,))
+
+ # Data for the next timestep needed to select an action
+ pre_transition_data["state"].append(data["state"])
+ pre_transition_data["avail_actions"].append(data["avail_actions"])
+ pre_transition_data["obs"].append(data["obs"])
+
+ # Add post_transiton data into the batch
+ self.batch.update(post_transition_data, bs=envs_not_terminated, ts=self.t, mark_filled=False)
+
+ if self.args.evaluate:
+ assert self.batch_size == 1
+ move = [["北", "南", "东", "西"][action - 2] if action > 1 and action < 6 else "action-{}".format(action)
+ for action in cpu_actions[0]]
+ print(self.t, move, post_transition_data["reward"])
+ time.sleep(1)
+
+ # Move onto the next timestep
+ self.t += 1
+
+ # Add the pre-transition data
+ self.batch.update(pre_transition_data, bs=envs_not_terminated, ts=self.t, mark_filled=True)
+
+ # Update envs_not_terminated
+ envs_not_terminated = [b_idx for b_idx, termed in enumerate(terminated) if not termed]
+ all_terminated = all(terminated)
+ if all_terminated:
+ break
+
+ if not test_mode:
+ self.t_env += self.env_steps_this_run
+
+ # Get stats back for each env
+ for parent_conn in self.parent_conns:
+ parent_conn.send(("get_stats", None))
+
+ env_stats = []
+ for parent_conn in self.parent_conns:
+ env_stat = parent_conn.recv()
+ env_stats.append(env_stat)
+
+ cur_stats = self.test_stats if test_mode else self.train_stats
+ cur_returns = self.test_returns if test_mode else self.train_returns
+ log_prefix = "test_" if test_mode else ""
+ infos = [cur_stats] + final_env_infos
+
+ cur_stats.update({k: sum(d.get(k, 0) for d in infos) for k in set.union(*[set(d) for d in infos])})
+ cur_stats["n_episodes"] = self.batch_size + cur_stats.get("n_episodes", 0)
+ cur_stats["ep_length"] = sum(episode_lengths) + cur_stats.get("ep_length", 0)
+
+ cur_returns.extend(episode_returns)
+
+ n_test_runs = max(1, self.args.test_nepisode // self.batch_size) * self.batch_size
+ if test_mode and (len(self.test_returns) == n_test_runs):
+ self._log(cur_returns, cur_stats, log_prefix)
+ elif not test_mode and self.t_env - self.log_train_stats_t >= self.args.runner_log_interval:
+ self._log(cur_returns, cur_stats, log_prefix)
+ if hasattr(self.mac.action_selector, "epsilon"):
+ self.logger.log_stat("epsilon", self.mac.action_selector.epsilon, self.t_env)
+ self.log_train_stats_t = self.t_env
+
+ return self.batch
+ # return clear_no_reward_sub_trajectory(self.batch)
+
+ def _log(self, returns, stats, prefix):
+ self.logger.log_stat(prefix + "return_min", np.min(returns), self.t_env)
+ self.logger.log_stat(prefix + "return_max", np.max(returns), self.t_env)
+ self.logger.log_stat(prefix + "return_mean", np.mean(returns), self.t_env)
+ self.logger.log_stat(prefix + "return_std", np.std(returns), self.t_env)
+ returns.clear()
+
+ for k, v in stats.items():
+ if k != "n_episodes":
+ self.logger.log_stat(prefix + k + "_mean", v / stats["n_episodes"], self.t_env)
+ stats.clear()
+
+
+def env_worker(remote, env_fn):
+ # Make environment
+ env = env_fn.x()
+ while True:
+ cmd, data = remote.recv()
+ if cmd == "step":
+ actions = data
+ # Take a step in the environment
+ reward, terminated, env_info = env.step(actions)
+ # Return the observations, avail_actions and state to make the next action
+ state = env.get_state()
+ avail_actions = env.get_avail_actions()
+ obs = env.get_obs()
+ remote.send({
+ # Data for the next timestep needed to pick an action
+ "state": state,
+ "avail_actions": avail_actions,
+ "obs": obs,
+ # Rest of the data for the current timestep
+ "reward": reward,
+ "terminated": terminated,
+ "info": env_info
+ })
+ elif cmd == "reset":
+ env.reset()
+ remote.send({
+ "state": env.get_state(),
+ "avail_actions": env.get_avail_actions(),
+ "obs": env.get_obs()
+ })
+ elif cmd == "close":
+ env.close()
+ remote.close()
+ break
+ elif cmd == "get_env_info":
+ remote.send(env.get_env_info())
+ elif cmd == "get_stats":
+ remote.send(env.get_stats())
+ else:
+ raise NotImplementedError
+
+
+class CloudpickleWrapper():
+ """
+ Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
+ """
+
+ def __init__(self, x):
+ self.x = x
+
+ def __getstate__(self):
+ import cloudpickle
+ return cloudpickle.dumps(self.x)
+
+ def __setstate__(self, ob):
+ import pickle
+ self.x = pickle.loads(ob)
diff --git a/src/utils/data_processing.py b/src/utils/data_processing.py
new file mode 100644
index 0000000..317cb6d
--- /dev/null
+++ b/src/utils/data_processing.py
@@ -0,0 +1,116 @@
+#!/usr/bin/env python
+# -*- coding: UTF-8 -*-
+
+from components.episode_buffer import EpisodeBatch
+import copy
+import numpy as np
+import torch as th
+
+
+def clear_no_reward_sub_trajectory(batch):
+ """
+ :param batch:
+ :return:
+ """
+ filled = batch.data.transition_data["filled"] # [bs, traj_length, 1]
+ rewards = batch.data.transition_data["reward"] # [bs, traj_length, 1]
+ bs, traj_length = filled.shape[0], filled.shape[1]
+ fixed_row = []
+ for t in range(traj_length - 1, 0, -1):
+ remained_rows = [i for i in range(0, bs) if i not in fixed_row]
+ for row_idx in remained_rows:
+ if rewards[row_idx, t - 1, 0] == 0: # no reward
+ filled[row_idx, t, 0] = 0
+ if t == 1:
+ filled[row_idx, t - 1, 0] = 0 # the trajectory's Return is 0.
+ else: # receive reward
+ fixed_row.append(row_idx)
+
+ return batch[fixed_row]
+
+
+def _get_obs_component_dim(args):
+ move_feats_dim, enemy_feats_dim, ally_feats_dim, own_feats_dim = args.obs_component # [4, (6, 5), (4, 5), 1]
+ enemy_feats_dim = np.prod(enemy_feats_dim)
+ ally_feats_dim = np.prod(ally_feats_dim)
+ return move_feats_dim, enemy_feats_dim, ally_feats_dim, own_feats_dim
+
+
+def _generate_permutation_matrix(bs, seq_length, n_agents, N, device):
+ permutation_matrix = th.zeros(size=[bs, seq_length, n_agents, N, N], dtype=th.float32, device=device)
+ ordered_list = np.arange(N) # [0, 1, 2, 3, ...]
+ shuffled_list = ordered_list.copy()
+ np.random.shuffle(shuffled_list) # [3, 0, 2, 1, ...]
+ permutation_matrix[:, :, :, ordered_list, shuffled_list] = 1
+ return permutation_matrix
+
+
+def do_data_augmentation(args, batch: EpisodeBatch, augment_times=2):
+ """
+ 'obs', 'attack action' and 'available action' need to be transformed
+ :param args:
+ :param batch:
+ :param augment_times:
+ :return:
+ """
+ bs = batch.batch_size
+ seq_length = batch.max_seq_length
+ obs_component_dim = _get_obs_component_dim(args=args)
+ attack_action_start_idx = 6
+
+ augmented_data = []
+ for t in range(augment_times):
+ new_batch = copy.deepcopy(batch)
+ obs = new_batch.data.transition_data["obs"] # [bs, seq_length, n_agents, obs_dim]
+ # actions = new_batch.data.transition_data["actions"] # [bs, seq_length, n_agents, 1]
+ actions_onehot = new_batch.data.transition_data["actions_onehot"] # [bs, seq_length, n_agents, action_num]
+ avail_actions = new_batch.data.transition_data["avail_actions"] # [bs, seq_length, n_agents, action_num]
+
+ # (1) split observation according to the semantic meaning
+ move_feats, enemy_feats, ally_feats, own_feats = th.split(obs, obs_component_dim, dim=-1)
+ reshaped_enemy_feats = enemy_feats.contiguous().view(bs, seq_length, args.n_agents, args.n_enemies, -1)
+ reshaped_ally_feats = ally_feats.contiguous().view(bs, seq_length, args.n_agents, (args.n_agents - 1), -1)
+
+ # (2) split available action into 2 groups: 'move' and 'attack'.
+ avail_other_action = avail_actions[:, :, :, :attack_action_start_idx] # (no_op, stop, up, down, right, left)
+ avail_attack_action = avail_actions[:, :, :, attack_action_start_idx:] # [n_enemies]
+
+ # (3) split actions_onehot into 2 groups: 'move' and 'attack'.
+ other_action_onehot = actions_onehot[:, :, :, :attack_action_start_idx] # (no_op, stop, up, down, right, left)
+ attack_action_onehot = actions_onehot[:, :, :, attack_action_start_idx:] # [n_enemies]
+
+ # (4) generate permutation matrix for 'ally' and 'enemy'
+ ally_perm_matrix = _generate_permutation_matrix(bs, seq_length, args.n_agents, args.n_agents - 1,
+ device=obs.device)
+ enemy_perm_matrix = _generate_permutation_matrix(bs, seq_length, args.n_agents, args.n_enemies,
+ device=obs.device)
+
+ # (5) permute obs: including ally and enemy
+ # [bs, seq_length, n_agents, N, N] * [bs, seq_length, n_agents, N, feature_dim]
+ permuted_enemy_feat = th.matmul(enemy_perm_matrix, reshaped_enemy_feats).view(bs, seq_length, args.n_agents, -1)
+ permuted_ally_feat = th.matmul(ally_perm_matrix, reshaped_ally_feats).view(bs, seq_length, args.n_agents, -1)
+ permuted_obs = th.cat([move_feats, permuted_enemy_feat, permuted_ally_feat, own_feats], dim=-1)
+ # permuted_obs = th.cat([move_feats, permuted_enemy_feat, ally_feats, own_feats], dim=-1)
+
+ # (6) permute available action (use the same permutation matrix for enemy)
+ permuted_avail_attack_action = th.matmul(enemy_perm_matrix, avail_attack_action.unsqueeze(-1).float()).view(
+ bs, seq_length, args.n_agents, -1)
+ permuted_avail_actions = th.cat([avail_other_action, permuted_avail_attack_action.int()], dim=-1)
+
+ # (7) permute attack_action_onehot (use the same permutation matrix for enemy)
+ # used when obs_last_action is True
+ permuted_attack_action_onehot = th.matmul(enemy_perm_matrix, attack_action_onehot.unsqueeze(-1).float()).view(
+ bs, seq_length, args.n_agents, -1)
+ permuted_action_onehot = th.cat([other_action_onehot, permuted_attack_action_onehot], dim=-1)
+ permuted_action = permuted_action_onehot.max(dim=-1, keepdim=True)[1]
+
+ new_batch.data.transition_data["obs"] = permuted_obs
+ new_batch.data.transition_data["actions"] = permuted_action
+ new_batch.data.transition_data["actions_onehot"] = permuted_action_onehot
+ new_batch.data.transition_data["avail_actions"] = permuted_avail_actions
+
+ if augment_times > 1:
+ augmented_data.append(new_batch)
+ if augment_times > 1:
+ return augmented_data
+ return new_batch
diff --git a/src/utils/dict2namedtuple.py b/src/utils/dict2namedtuple.py
new file mode 100644
index 0000000..bb474fd
--- /dev/null
+++ b/src/utils/dict2namedtuple.py
@@ -0,0 +1,5 @@
+from collections import namedtuple
+
+
+def convert(dictionary):
+ return namedtuple('GenericDict', dictionary.keys())(**dictionary)
diff --git a/src/utils/logging.py b/src/utils/logging.py
new file mode 100644
index 0000000..5393b7f
--- /dev/null
+++ b/src/utils/logging.py
@@ -0,0 +1,68 @@
+from collections import defaultdict
+import logging
+import numpy as np
+import torch as th
+
+class Logger:
+ def __init__(self, console_logger):
+ self.console_logger = console_logger
+
+ self.use_tb = False
+ self.use_sacred = False
+ self.use_hdf = False
+
+ self.stats = defaultdict(lambda: [])
+
+ def setup_tb(self, directory_name):
+ # Import here so it doesn't have to be installed if you don't use it
+ from tensorboard_logger import configure, log_value
+ configure(directory_name)
+ self.tb_logger = log_value
+ self.use_tb = True
+
+ def setup_sacred(self, sacred_run_dict):
+ self.sacred_info = sacred_run_dict.info
+ self.use_sacred = True
+
+ def log_stat(self, key, value, t, to_sacred=True):
+ self.stats[key].append((t, value))
+
+ if self.use_tb:
+ self.tb_logger(key, value, t)
+
+ if self.use_sacred and to_sacred:
+ if key in self.sacred_info:
+ self.sacred_info["{}_T".format(key)].append(t)
+ self.sacred_info[key].append(value)
+ else:
+ self.sacred_info["{}_T".format(key)] = [t]
+ self.sacred_info[key] = [value]
+
+ def print_recent_stats(self):
+ log_str = "Recent Stats | t_env: {:>10} | Episode: {:>8}\n".format(*self.stats["episode"][-1])
+ i = 0
+ for (k, v) in sorted(self.stats.items()):
+ if k == "episode":
+ continue
+ i += 1
+ window = 5 if k != "epsilon" else 1
+ item = "{:.4f}".format(th.mean(th.tensor([float(x[1]) for x in self.stats[k][-window:]])))
+ log_str += "{:<25}{:>8}".format(k + ":", item)
+ log_str += "\n" if i % 4 == 0 else "\t"
+ self.console_logger.info(log_str)
+ # Reset stats to avoid accumulating logs in memory
+ self.stats = defaultdict(lambda: [])
+
+
+# set up a custom logger
+def get_logger():
+ logger = logging.getLogger()
+ logger.handlers = []
+ ch = logging.StreamHandler()
+ formatter = logging.Formatter('[%(levelname)s %(asctime)s] %(name)s %(message)s', '%H:%M:%S')
+ ch.setFormatter(formatter)
+ logger.addHandler(ch)
+ logger.setLevel('DEBUG')
+
+ return logger
+
diff --git a/src/utils/noisy_liner.py b/src/utils/noisy_liner.py
new file mode 100644
index 0000000..36b191e
--- /dev/null
+++ b/src/utils/noisy_liner.py
@@ -0,0 +1,78 @@
+import math
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+
+class NoisyLinear(nn.Module):
+ r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
+ This module supports :ref:`TensorFloat32`.
+ Args:
+ in_features: size of each input sample
+ out_features: size of each output sample
+ bias: If set to ``False``, the layer will not learn an additive bias.
+ Default: ``True``
+ Shape:
+ - Input: :math:`(N, *, H_{in})` where :math:`*` means any number of
+ additional dimensions and :math:`H_{in} = \text{in\_features}`
+ - Output: :math:`(N, *, H_{out})` where all but the last dimension
+ are the same shape as the input and :math:`H_{out} = \text{out\_features}`.
+ Attributes:
+ weight: the learnable weights of the module of shape
+ :math:`(\text{out\_features}, \text{in\_features})`. The values are
+ initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where
+ :math:`k = \frac{1}{\text{in\_features}}`
+ bias: the learnable bias of the module of shape :math:`(\text{out\_features})`.
+ If :attr:`bias` is ``True``, the values are initialized from
+ :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
+ :math:`k = \frac{1}{\text{in\_features}}`
+ Examples::
+ >>> m = nn.Linear(20, 30)
+ >>> input = torch.randn(128, 20)
+ >>> output = m(input)
+ >>> print(output.size())
+ torch.Size([128, 30])
+ """
+ __constants__ = ['in_features', 'out_features']
+ # in_features: int
+ # out_features: int
+ # weight: torch.Tensor
+
+ def __init__(self, in_features: int, out_features: int, bias: bool = True, device: str = 'cpu') -> None:
+ super(NoisyLinear, self).__init__()
+ self.in_features = in_features
+ self.out_features = out_features
+ self.bias = bias
+ self.device = device
+ self.u_w = nn.Parameter(torch.Tensor(out_features, in_features))
+ self.s_w = nn.Parameter(torch.Tensor(out_features, in_features))
+ if bias:
+ self.u_b = nn.Parameter(torch.Tensor(out_features))
+ self.s_b = nn.Parameter(torch.Tensor(out_features))
+ else:
+ self.register_parameter('bias', None)
+ self.reset_parameters()
+
+ def reset_parameters(self) -> None:
+ nn.init.kaiming_uniform_(self.u_w, a=math.sqrt(3 / self.in_features))
+ nn.init.constant_(self.s_w, 0.017)
+ if self.bias is not None:
+ nn.init.uniform_(self.u_b, a=math.sqrt(3 / self.in_features))
+ nn.init.constant_(self.s_b, 0.017)
+
+ def forward(self, input: torch.Tensor) -> torch.Tensor:
+ if self.training:
+ e_w = torch.randn(self.s_w.shape, device=self.device)
+ e_b = torch.randn(self.s_b.shape, device=self.device)
+ weight = self.u_w + (self.s_w * e_w)
+ bias = self.u_b + (self.s_b * e_b)
+ else:
+ weight = self.u_w
+ bias = self.u_b
+ return F.linear(input, weight, bias)
+
+ def extra_repr(self) -> str:
+ return 'in_features={}, out_features={}, bias={}'.format(
+ self.in_features, self.out_features, self.bias is not None
+ )
\ No newline at end of file
diff --git a/src/utils/rl_utils.py b/src/utils/rl_utils.py
new file mode 100644
index 0000000..397cb32
--- /dev/null
+++ b/src/utils/rl_utils.py
@@ -0,0 +1,89 @@
+import torch as th
+import torch.nn as nn
+import numpy as np
+
+
+def build_td_lambda_targets(rewards, terminated, mask, target_qs, gamma, td_lambda):
+ # Assumes in B*T*A and , , in (at least) B*T-1*1
+ # Initialise last lambda -return for not terminated episodes
+ ret = target_qs.new_zeros(*target_qs.shape)
+ ret[:, -1] = target_qs[:, -1] * (1 - th.sum(terminated, dim=1))
+ # Backwards recursive update of the "forward view"
+ for t in range(ret.shape[1] - 2, -1, -1):
+ ret[:, t] = td_lambda * gamma * ret[:, t + 1] + mask[:, t] \
+ * (rewards[:, t] + (1 - td_lambda) * gamma * target_qs[:, t + 1] * (1 - terminated[:, t]))
+ # Returns lambda-return from t=0 to t=T-1, i.e. in B*T-1*A
+ return ret[:, 0:-1]
+
+
+def build_gae_targets(rewards, masks, values, gamma, lambd):
+ B, T, A, _ = values.size()
+ T-=1
+ advantages = th.zeros(B, T, A, 1).to(device=values.device)
+ advantage_t = th.zeros(B, A, 1).to(device=values.device)
+
+ for t in reversed(range(T)):
+ delta = rewards[:, t] + values[:, t+1] * gamma * masks[:, t] - values[:, t]
+ advantage_t = delta + advantage_t * gamma * lambd * masks[:, t]
+ advantages[:, t] = advantage_t
+
+ returns = values[:, :T] + advantages
+ return advantages, returns
+
+
+def build_q_lambda_targets(rewards, terminated, mask, exp_qvals, qvals, gamma, td_lambda):
+ # Assumes in B*T*A and , , in (at least) B*T-1*1
+ # Initialise last lambda -return for not terminated episodes
+ ret = exp_qvals.new_zeros(*exp_qvals.shape)
+ ret[:, -1] = exp_qvals[:, -1] * (1 - th.sum(terminated, dim=1))
+ # Backwards recursive update of the "forward view"
+ for t in range(ret.shape[1] - 2, -1, -1):
+ reward = rewards[:, t] + exp_qvals[:, t] - qvals[:, t] #off-policy correction
+ ret[:, t] = td_lambda * gamma * ret[:, t + 1] + mask[:, t] \
+ * (reward + (1 - td_lambda) * gamma * exp_qvals[:, t + 1] * (1 - terminated[:, t]))
+ # Returns lambda-return from t=0 to t=T-1, i.e. in B*T-1*A
+ return ret[:, 0:-1]
+
+
+def build_target_q(td_q, target_q, mac, mask, gamma, td_lambda, n):
+ aug = th.zeros_like(td_q[:, :1])
+
+ #Tree diagram
+ mac = mac[:, :-1]
+ tree_q_vals = th.zeros_like(td_q)
+ coeff = 1.0
+ t1 = td_q[:]
+ for _ in range(n):
+ tree_q_vals += t1 * coeff
+ t1 = th.cat(((t1 * mac)[:, 1:], aug), dim=1)
+ coeff *= gamma * td_lambda
+ return target_q + tree_q_vals
+
+class RunningMeanStd(object):
+ # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
+ def __init__(self, epsilon=1e-4, shape=()):
+ self.mean = np.zeros(shape, 'float64')
+ self.var = np.ones(shape, 'float64')
+ self.count = epsilon
+
+ def update(self, x):
+ batch_mean = np.mean(x, axis=0)
+ batch_var = np.var(x, axis=0)
+ batch_count = x.shape[0]
+ self.update_from_moments(batch_mean, batch_var, batch_count)
+
+ def update_from_moments(self, batch_mean, batch_var, batch_count):
+ delta = batch_mean - self.mean
+ tot_count = self.count + batch_count
+
+ new_mean = self.mean + delta * batch_count / tot_count
+ m_a = self.var * (self.count)
+ m_b = batch_var * (batch_count)
+ M2 = m_a + m_b + np.square(delta) * self.count * batch_count / (self.count + batch_count)
+ new_var = M2 / (self.count + batch_count)
+
+ new_count = batch_count + self.count
+
+ self.mean = new_mean
+ self.var = new_var
+ self.count = new_count
\ No newline at end of file
diff --git a/src/utils/th_utils.py b/src/utils/th_utils.py
new file mode 100644
index 0000000..db2356a
--- /dev/null
+++ b/src/utils/th_utils.py
@@ -0,0 +1,33 @@
+import torch
+from torch import nn
+
+def clip_by_tensor(t,t_min,t_max):
+ """
+ clip_by_tensor
+ :param t: tensor
+ :param t_min: min
+ :param t_max: max
+ :return: cliped tensor
+ """
+ t=t.float()
+ t_min=t_min.float()
+ t_max=t_max.float()
+
+ result = (t >= t_min).float() * t + (t < t_min).float() * t_min
+ result = (result <= t_max).float() * result + (result > t_max).float() * t_max
+ return result
+
+def get_parameters_num(param_list):
+ return str(sum(p.numel() for p in param_list) / 1000) + 'K'
+
+
+def init(module, weight_init, bias_init, gain=1):
+ weight_init(module.weight.data, gain=gain)
+ bias_init(module.bias.data)
+ return module
+
+
+def orthogonal_init_(m, gain=1):
+ if isinstance(m, nn.Linear):
+ init(m, nn.init.orthogonal_,
+ lambda x: nn.init.constant_(x, 0), gain=gain)
\ No newline at end of file
diff --git a/src/utils/timehelper.py b/src/utils/timehelper.py
new file mode 100644
index 0000000..a168c79
--- /dev/null
+++ b/src/utils/timehelper.py
@@ -0,0 +1,43 @@
+import time
+import numpy as np
+
+
+def print_time(start_time, T, t_max, episode, episode_rewards):
+ time_elapsed = time.time() - start_time
+ T = max(1, T)
+ time_left = time_elapsed * (t_max - T) / T
+ # Just in case its over 100 days
+ time_left = min(time_left, 60 * 60 * 24 * 100)
+ last_reward = "N\A"
+ if len(episode_rewards) > 5:
+ last_reward = "{:.2f}".format(np.mean(episode_rewards[-50:]))
+ print("\033[F\033[F\x1b[KEp: {:,}, T: {:,}/{:,}, Reward: {}, \n\x1b[KElapsed: {}, Left: {}\n".format(episode, T, t_max, last_reward, time_str(time_elapsed), time_str(time_left)), " " * 10, end="\r")
+
+
+def time_left(start_time, t_start, t_current, t_max):
+ if t_current >= t_max:
+ return "-"
+ time_elapsed = time.time() - start_time
+ t_current = max(1, t_current)
+ time_left = time_elapsed * (t_max - t_current) / (t_current - t_start)
+ # Just in case its over 100 days
+ time_left = min(time_left, 60 * 60 * 24 * 100)
+ return time_str(time_left)
+
+
+def time_str(s):
+ """
+ Convert seconds to a nicer string showing days, hours, minutes and seconds
+ """
+ days, remainder = divmod(s, 60 * 60 * 24)
+ hours, remainder = divmod(remainder, 60 * 60)
+ minutes, seconds = divmod(remainder, 60)
+ string = ""
+ if days > 0:
+ string += "{:d} days, ".format(int(days))
+ if hours > 0:
+ string += "{:d} hours, ".format(int(hours))
+ if minutes > 0:
+ string += "{:d} minutes, ".format(int(minutes))
+ string += "{:d} seconds".format(int(seconds))
+ return string
diff --git a/src/utils/value_norm.py b/src/utils/value_norm.py
new file mode 100644
index 0000000..f1170ba
--- /dev/null
+++ b/src/utils/value_norm.py
@@ -0,0 +1,76 @@
+import numpy as np
+
+import torch
+import torch.nn as nn
+
+
+class ValueNorm(nn.Module):
+ """ Normalize a vector of observations - across the first norm_axes dimensions"""
+
+ def __init__(self, input_shape, norm_axes=1, beta=0.99999, per_element_update=False, epsilon=1e-5, device=torch.device("cpu")):
+ super(ValueNorm, self).__init__()
+
+ self.input_shape = input_shape
+ self.norm_axes = norm_axes
+ self.epsilon = epsilon
+ self.beta = beta
+ self.per_element_update = per_element_update
+ self.tpdv = dict(dtype=torch.float32, device=device)
+
+ self.running_mean = nn.Parameter(torch.zeros(input_shape), requires_grad=False).to(**self.tpdv)
+ self.running_mean_sq = nn.Parameter(torch.zeros(input_shape), requires_grad=False).to(**self.tpdv)
+ self.debiasing_term = nn.Parameter(torch.tensor(0.0), requires_grad=False).to(**self.tpdv)
+
+ self.reset_parameters()
+
+ def reset_parameters(self):
+ self.running_mean.zero_()
+ self.running_mean_sq.zero_()
+ self.debiasing_term.zero_()
+
+ def running_mean_var(self):
+ debiased_mean = self.running_mean / self.debiasing_term.clamp(min=self.epsilon)
+ debiased_mean_sq = self.running_mean_sq / self.debiasing_term.clamp(min=self.epsilon)
+ debiased_var = (debiased_mean_sq - debiased_mean ** 2).clamp(min=1e-2)
+ return debiased_mean, debiased_var
+
+ @torch.no_grad()
+ def update(self, input_vector):
+ if type(input_vector) == np.ndarray:
+ input_vector = torch.from_numpy(input_vector)
+ input_vector = input_vector.to(**self.tpdv)
+
+ batch_mean = input_vector.mean(dim=tuple(range(self.norm_axes)))
+ batch_sq_mean = (input_vector ** 2).mean(dim=tuple(range(self.norm_axes)))
+
+ if self.per_element_update:
+ batch_size = np.prod(input_vector.size()[:self.norm_axes])
+ weight = self.beta ** batch_size
+ else:
+ weight = self.beta
+
+ self.running_mean.mul_(weight).add_(batch_mean * (1.0 - weight))
+ self.running_mean_sq.mul_(weight).add_(batch_sq_mean * (1.0 - weight))
+ self.debiasing_term.mul_(weight).add_(1.0 * (1.0 - weight))
+
+ def normalize(self, input_vector):
+ # Make sure input is float32
+ if type(input_vector) == np.ndarray:
+ input_vector = torch.from_numpy(input_vector)
+ input_vector = input_vector.to(**self.tpdv)
+
+ mean, var = self.running_mean_var()
+ out = (input_vector - mean[(None,) * self.norm_axes]) / torch.sqrt(var)[(None,) * self.norm_axes]
+
+ return out
+
+ def denormalize(self, input_vector):
+ """ Transform normalized data back into original distribution """
+ if type(input_vector) == np.ndarray:
+ input_vector = torch.from_numpy(input_vector)
+ input_vector = input_vector.to(**self.tpdv)
+
+ mean, var = self.running_mean_var()
+ out = input_vector * torch.sqrt(var)[(None,) * self.norm_axes] + mean[(None,) * self.norm_axes]
+
+ return out
\ No newline at end of file