diff --git a/DevProject/Packages/packages-lock.json b/DevProject/Packages/packages-lock.json index 682353c648..5cadcd6fb7 100644 --- a/DevProject/Packages/packages-lock.json +++ b/DevProject/Packages/packages-lock.json @@ -90,7 +90,7 @@ "depth": 0, "source": "local", "dependencies": { - "com.unity.ml-agents": "3.0.0", + "com.unity.ml-agents": "4.0.0", "com.unity.modules.physics": "1.0.0" } }, diff --git a/Project/Packages/packages-lock.json b/Project/Packages/packages-lock.json index 05c7777a63..81d16f38e4 100644 --- a/Project/Packages/packages-lock.json +++ b/Project/Packages/packages-lock.json @@ -116,7 +116,7 @@ "depth": 0, "source": "local", "dependencies": { - "com.unity.ml-agents": "3.0.0", + "com.unity.ml-agents": "4.0.0", "com.unity.modules.physics": "1.0.0" } }, diff --git a/colab/Colab_UnityEnvironment_1_Run.ipynb b/colab/Colab_UnityEnvironment_1_Run.ipynb index b221ac8421..1ef34a4272 100644 --- a/colab/Colab_UnityEnvironment_1_Run.ipynb +++ b/colab/Colab_UnityEnvironment_1_Run.ipynb @@ -32,7 +32,7 @@ }, "source": [ "# ML-Agents Open a UnityEnvironment\n", - "" + "" ] }, { diff --git a/colab/Colab_UnityEnvironment_2_Train.ipynb b/colab/Colab_UnityEnvironment_2_Train.ipynb index ca55f8a58e..f61dc1e767 100644 --- a/colab/Colab_UnityEnvironment_2_Train.ipynb +++ b/colab/Colab_UnityEnvironment_2_Train.ipynb @@ -22,7 +22,7 @@ }, "source": [ "# ML-Agents Q-Learning with GridWorld\n", - "" + "" ] }, { @@ -190,7 +190,7 @@ "id": "pZhVRfdoyPmv" }, "source": [ - "The [GridWorld](https://github.com/Unity-Technologies/ml-agents/blob/release_22_docs/docs/Learning-Environment-Examples.md#gridworld) Environment is a simple Unity visual environment. The Agent is a blue square in a 3x3 grid that is trying to reach a green __`+`__ while avoiding a red __`x`__.\n", + "The [GridWorld](https://github.com/Unity-Technologies/ml-agents/blob/release_23_docs/docs/Learning-Environment-Examples.md#gridworld) Environment is a simple Unity visual environment. The Agent is a blue square in a 3x3 grid that is trying to reach a green __`+`__ while avoiding a red __`x`__.\n", "\n", "The observation is an image obtained by a camera on top of the grid.\n", "\n", diff --git a/colab/Colab_UnityEnvironment_3_SideChannel.ipynb b/colab/Colab_UnityEnvironment_3_SideChannel.ipynb index 646a01defe..344169aec9 100644 --- a/colab/Colab_UnityEnvironment_3_SideChannel.ipynb +++ b/colab/Colab_UnityEnvironment_3_SideChannel.ipynb @@ -23,7 +23,7 @@ }, "source": [ "# ML-Agents Use SideChannels\n", - "" + "" ] }, { @@ -176,7 +176,7 @@ "## Side Channel\n", "\n", "SideChannels are objects that can be passed to the constructor of a UnityEnvironment or the `make()` method of a registry entry to send non Reinforcement Learning related data.\n", - "More information available [here](https://github.com/Unity-Technologies/ml-agents/blob/release_22_docs/docs/Python-API.md#communicating-additional-information-with-the-environment)\n", + "More information available [here](https://github.com/Unity-Technologies/ml-agents/blob/release_23_docs/docs/Python-API.md#communicating-additional-information-with-the-environment)\n", "\n", "\n", "\n" @@ -189,7 +189,7 @@ }, "source": [ "### Engine Configuration SideChannel\n", - "The [Engine Configuration Side Channel](https://github.com/Unity-Technologies/ml-agents/blob/release_22_docs/docs/Python-API.md#engineconfigurationchannel) is used to configure how the Unity Engine should run.\n", + "The [Engine Configuration Side Channel](https://github.com/Unity-Technologies/ml-agents/blob/release_23_docs/docs/Python-API.md#engineconfigurationchannel) is used to configure how the Unity Engine should run.\n", "We will use the GridWorld environment to demonstrate how to use the EngineConfigurationChannel." ] }, @@ -282,7 +282,7 @@ }, "source": [ "### Environment Parameters Channel\n", - "The [Environment Parameters Side Channel](https://github.com/Unity-Technologies/ml-agents/blob/release_22_docs/docs/Python-API.md#environmentparameters) is used to modify environment parameters during the simulation.\n", + "The [Environment Parameters Side Channel](https://github.com/Unity-Technologies/ml-agents/blob/release_23_docs/docs/Python-API.md#environmentparameters) is used to modify environment parameters during the simulation.\n", "We will use the GridWorld environment to demonstrate how to use the EngineConfigurationChannel." ] }, @@ -419,7 +419,7 @@ }, "source": [ "### Creating your own Side Channels\n", - "You can send various kinds of data between a Unity Environment and Python but you will need to [create your own implementation of a Side Channel](https://github.com/Unity-Technologies/ml-agents/blob/release_22_docs/docs/Custom-SideChannels.md#custom-side-channels) for advanced use cases.\n" + "You can send various kinds of data between a Unity Environment and Python but you will need to [create your own implementation of a Side Channel](https://github.com/Unity-Technologies/ml-agents/blob/release_23_docs/docs/Custom-SideChannels.md#custom-side-channels) for advanced use cases.\n" ] }, { diff --git a/colab/Colab_UnityEnvironment_4_SB3VectorEnv.ipynb b/colab/Colab_UnityEnvironment_4_SB3VectorEnv.ipynb index e5d3d45c8b..caf2c6cb3d 100644 --- a/colab/Colab_UnityEnvironment_4_SB3VectorEnv.ipynb +++ b/colab/Colab_UnityEnvironment_4_SB3VectorEnv.ipynb @@ -7,7 +7,7 @@ }, "source": [ "# ML-Agents run with Stable Baselines 3\n", - "" + "" ] }, { diff --git a/com.unity.ml-agents.extensions/Documentation~/com.unity.ml-agents.extensions.md b/com.unity.ml-agents.extensions/Documentation~/com.unity.ml-agents.extensions.md index 9421ae1484..96c248af35 100644 --- a/com.unity.ml-agents.extensions/Documentation~/com.unity.ml-agents.extensions.md +++ b/com.unity.ml-agents.extensions/Documentation~/com.unity.ml-agents.extensions.md @@ -28,24 +28,24 @@ The ML-Agents Extensions package is not currently available in the Package Manag recommended ways to install the package: ### Local Installation -[Clone the repository](https://github.com/Unity-Technologies/ml-agents/tree/release_22_docs/docs/Installation.md#clone-the-ml-agents-toolkit-repository-optional) and follow the -[Local Installation for Development](https://github.com/Unity-Technologies/ml-agents/tree/release_22_docs/docs/Installation.md#advanced-local-installation-for-development-1) +[Clone the repository](https://github.com/Unity-Technologies/ml-agents/tree/release_23_docs/docs/Installation.md#clone-the-ml-agents-toolkit-repository-optional) and follow the +[Local Installation for Development](https://github.com/Unity-Technologies/ml-agents/tree/release_23_docs/docs/Installation.md#advanced-local-installation-for-development-1) directions (substituting `com.unity.ml-agents.extensions` for the package name). ### Github via Package Manager In Unity 2019.4 or later, open the Package Manager, hit the "+" button, and select "Add package from git URL". -![Package Manager git URL](https://github.com/Unity-Technologies/ml-agents/blob/release_22_docs/docs/images/unity_package_manager_git_url.png) +![Package Manager git URL](https://github.com/Unity-Technologies/ml-agents/blob/release_23_docs/docs/images/unity_package_manager_git_url.png) In the dialog that appears, enter ``` -git+https://github.com/Unity-Technologies/ml-agents.git?path=com.unity.ml-agents.extensions#release_22 +git+https://github.com/Unity-Technologies/ml-agents.git?path=com.unity.ml-agents.extensions#release_23 ``` You can also edit your project's `manifest.json` directly and add the following line to the `dependencies` section: ``` -"com.unity.ml-agents.extensions": "git+https://github.com/Unity-Technologies/ml-agents.git?path=com.unity.ml-agents.extensions#release_22", +"com.unity.ml-agents.extensions": "git+https://github.com/Unity-Technologies/ml-agents.git?path=com.unity.ml-agents.extensions#release_23", ``` See [Git dependencies](https://docs.unity3d.com/Manual/upm-git.html#subfolder) for more information. Note that this may take several minutes to resolve the packages the first time that you add it. @@ -67,4 +67,4 @@ If using the `InputActuatorComponent` - No way to customize the action space of the `InputActuatorComponent` ## Need Help? -The main [README](https://github.com/Unity-Technologies/ml-agents/tree/release_22_docs/README.md) contains links for contacting the team or getting support. +The main [README](https://github.com/Unity-Technologies/ml-agents/tree/release_23_docs/README.md) contains links for contacting the team or getting support. diff --git a/com.unity.ml-agents.extensions/package.json b/com.unity.ml-agents.extensions/package.json index c315091c26..9b1bac450b 100644 --- a/com.unity.ml-agents.extensions/package.json +++ b/com.unity.ml-agents.extensions/package.json @@ -5,7 +5,7 @@ "unity": "6000.0", "description": "A source-only package for new features based on ML-Agents", "dependencies": { - "com.unity.ml-agents": "3.0.0", + "com.unity.ml-agents": "4.0.0", "com.unity.modules.physics": "1.0.0" } } diff --git a/com.unity.ml-agents/Documentation~/com.unity.ml-agents.md b/com.unity.ml-agents/Documentation~/com.unity.ml-agents.md index 60268878b5..f149105587 100644 --- a/com.unity.ml-agents/Documentation~/com.unity.ml-agents.md +++ b/com.unity.ml-agents/Documentation~/com.unity.ml-agents.md @@ -73,10 +73,10 @@ You can control the frequency of Academy stepping by calling `Academy.Instance.D * [Website] [github docs]: https://unity-technologies.github.io/ml-agents/ -[installation instructions]: https://github.com/Unity-Technologies/ml-agents/blob/release_22_docs/docs/Installation.md +[installation instructions]: https://github.com/Unity-Technologies/ml-agents/blob/release_23_docs/docs/Installation.md [Unity Inference Engine]: https://docs.unity3d.com/Packages/com.unity.ai.inference@2.2/manual/index.html [python package]: https://github.com/Unity-Technologies/ml-agents -[ML-Agents GitHub repo]: https://github.com/Unity-Technologies/ml-agents/blob/release_22_docs/com.unity.ml-agents.extensions +[ML-Agents GitHub repo]: https://github.com/Unity-Technologies/ml-agents/blob/release_23_docs/com.unity.ml-agents.extensions [GitHub repository]: https://github.com/Unity-Technologies/ml-agents [Execution Order of Event Functions]: https://docs.unity3d.com/Manual/ExecutionOrder.html [Unity Discussions]: https://discussions.unity.com/tag/ml-agents diff --git a/com.unity.ml-agents/Runtime/Academy.cs b/com.unity.ml-agents/Runtime/Academy.cs index bebb7174e4..ccd95b505c 100644 --- a/com.unity.ml-agents/Runtime/Academy.cs +++ b/com.unity.ml-agents/Runtime/Academy.cs @@ -20,7 +20,7 @@ * API. For more information on each of these entities, in addition to how to * set-up a learning environment and train the behavior of characters in a * Unity scene, please browse our documentation pages on GitHub: - * https://github.com/Unity-Technologies/ml-agents/tree/release_22_docs/docs/ + * https://github.com/Unity-Technologies/ml-agents/tree/release_23_docs/docs/ */ namespace Unity.MLAgents @@ -61,7 +61,7 @@ void FixedUpdate() /// fall back to inference or heuristic decisions. (You can also set agents to always use /// inference or heuristics.) /// - [HelpURL("https://github.com/Unity-Technologies/ml-agents/tree/release_22_docs/" + + [HelpURL("https://github.com/Unity-Technologies/ml-agents/tree/release_23_docs/" + "docs/Learning-Environment-Design.md")] public class Academy : IDisposable { diff --git a/com.unity.ml-agents/Runtime/Actuators/IActionReceiver.cs b/com.unity.ml-agents/Runtime/Actuators/IActionReceiver.cs index 3dbb89d816..bf2451c4f1 100644 --- a/com.unity.ml-agents/Runtime/Actuators/IActionReceiver.cs +++ b/com.unity.ml-agents/Runtime/Actuators/IActionReceiver.cs @@ -184,7 +184,7 @@ public interface IActionReceiver /// /// See [Agents - Actions] for more information on masking actions. /// - /// [Agents - Actions]: https://github.com/Unity-Technologies/ml-agents/blob/release_22_docs/docs/Learning-Environment-Design-Agents.md#actions + /// [Agents - Actions]: https://github.com/Unity-Technologies/ml-agents/blob/release_23_docs/docs/Learning-Environment-Design-Agents.md#actions /// /// void WriteDiscreteActionMask(IDiscreteActionMask actionMask); diff --git a/com.unity.ml-agents/Runtime/Actuators/IDiscreteActionMask.cs b/com.unity.ml-agents/Runtime/Actuators/IDiscreteActionMask.cs index dfb33901ff..5fdfab2b95 100644 --- a/com.unity.ml-agents/Runtime/Actuators/IDiscreteActionMask.cs +++ b/com.unity.ml-agents/Runtime/Actuators/IDiscreteActionMask.cs @@ -16,7 +16,7 @@ public interface IDiscreteActionMask /// /// See [Agents - Actions] for more information on masking actions. /// - /// [Agents - Actions]: https://github.com/Unity-Technologies/ml-agents/blob/release_22_docs/docs/Learning-Environment-Design-Agents.md#masking-discrete-actions + /// [Agents - Actions]: https://github.com/Unity-Technologies/ml-agents/blob/release_23_docs/docs/Learning-Environment-Design-Agents.md#masking-discrete-actions /// /// The branch for which the actions will be masked. /// Index of the action. diff --git a/com.unity.ml-agents/Runtime/Agent.cs b/com.unity.ml-agents/Runtime/Agent.cs index ea13bcfa6a..42c9d52765 100644 --- a/com.unity.ml-agents/Runtime/Agent.cs +++ b/com.unity.ml-agents/Runtime/Agent.cs @@ -192,13 +192,13 @@ public override BuiltInActuatorType GetBuiltInActuatorType() /// [OnDisable()]: https://docs.unity3d.com/ScriptReference/MonoBehaviour.OnDisable.html] /// [OnBeforeSerialize()]: https://docs.unity3d.com/ScriptReference/MonoBehaviour.OnBeforeSerialize.html /// [OnAfterSerialize()]: https://docs.unity3d.com/ScriptReference/MonoBehaviour.OnAfterSerialize.html - /// [Agents]: https://github.com/Unity-Technologies/ml-agents/blob/release_22_docs/docs/Learning-Environment-Design-Agents.md - /// [Reinforcement Learning in Unity]: https://github.com/Unity-Technologies/ml-agents/blob/release_22_docs/docs/Learning-Environment-Design.md + /// [Agents]: https://github.com/Unity-Technologies/ml-agents/blob/release_23_docs/docs/Learning-Environment-Design-Agents.md + /// [Reinforcement Learning in Unity]: https://github.com/Unity-Technologies/ml-agents/blob/release_23_docs/docs/Learning-Environment-Design.md /// [Unity ML-Agents Toolkit]: https://github.com/Unity-Technologies/ml-agents - /// [Unity ML-Agents Toolkit manual]: https://github.com/Unity-Technologies/ml-agents/blob/release_22_docs/docs/Readme.md + /// [Unity ML-Agents Toolkit manual]: https://github.com/Unity-Technologies/ml-agents/blob/release_23_docs/docs/Readme.md /// /// - [HelpURL("https://github.com/Unity-Technologies/ml-agents/blob/release_22_docs/" + + [HelpURL("https://github.com/Unity-Technologies/ml-agents/blob/release_23_docs/" + "docs/Learning-Environment-Design-Agents.md")] [Serializable] [RequireComponent(typeof(BehaviorParameters))] @@ -728,8 +728,8 @@ public int CompletedEpisodes /// for information about mixing reward signals from curiosity and Generative Adversarial /// Imitation Learning (GAIL) with rewards supplied through this method. /// - /// [Agents - Rewards]: https://github.com/Unity-Technologies/ml-agents/blob/release_22_docs/docs/Learning-Environment-Design-Agents.md#rewards - /// [Reward Signals]: https://github.com/Unity-Technologies/ml-agents/blob/release_22_docs/docs/ML-Agents-Overview.md#a-quick-note-on-reward-signals + /// [Agents - Rewards]: https://github.com/Unity-Technologies/ml-agents/blob/release_23_docs/docs/Learning-Environment-Design-Agents.md#rewards + /// [Reward Signals]: https://github.com/Unity-Technologies/ml-agents/blob/release_23_docs/docs/ML-Agents-Overview.md#a-quick-note-on-reward-signals /// /// The new value of the reward. public void SetReward(float reward) @@ -756,8 +756,8 @@ public void SetReward(float reward) /// for information about mixing reward signals from curiosity and Generative Adversarial /// Imitation Learning (GAIL) with rewards supplied through this method. /// - /// [Agents - Rewards]: https://github.com/Unity-Technologies/ml-agents/blob/release_22_docs/docs/Learning-Environment-Design-Agents.md#rewards - /// [Reward Signals]: https://github.com/Unity-Technologies/ml-agents/blob/release_22_docs/docs/ML-Agents-Overview.md#a-quick-note-on-reward-signals + /// [Agents - Rewards]: https://github.com/Unity-Technologies/ml-agents/blob/release_23_docs/docs/Learning-Environment-Design-Agents.md#rewards + /// [Reward Signals]: https://github.com/Unity-Technologies/ml-agents/blob/release_23_docs/docs/ML-Agents-Overview.md#a-quick-note-on-reward-signals /// /// Incremental reward value. public void AddReward(float increment) @@ -945,8 +945,8 @@ public virtual void Initialize() { } /// implementing a simple heuristic function can aid in debugging agent actions and interactions /// with its environment. /// - /// [Demonstration Recorder]: https://github.com/Unity-Technologies/ml-agents/blob/release_22_docs/docs/Learning-Environment-Design-Agents.md#recording-demonstrations - /// [Actions]: https://github.com/Unity-Technologies/ml-agents/blob/release_22_docs/docs/Learning-Environment-Design-Agents.md#actions + /// [Demonstration Recorder]: https://github.com/Unity-Technologies/ml-agents/blob/release_23_docs/docs/Learning-Environment-Design-Agents.md#recording-demonstrations + /// [Actions]: https://github.com/Unity-Technologies/ml-agents/blob/release_23_docs/docs/Learning-Environment-Design-Agents.md#actions /// [GameObject]: https://docs.unity3d.com/Manual/GameObjects.html /// /// @@ -1203,7 +1203,7 @@ void ResetSensors() /// For more information about observations, see [Observations and Sensors]. /// /// [GameObject]: https://docs.unity3d.com/Manual/GameObjects.html - /// [Observations and Sensors]: https://github.com/Unity-Technologies/ml-agents/blob/release_22_docs/docs/Learning-Environment-Design-Agents.md#observations-and-sensors + /// [Observations and Sensors]: https://github.com/Unity-Technologies/ml-agents/blob/release_23_docs/docs/Learning-Environment-Design-Agents.md#observations-and-sensors /// public virtual void CollectObservations(VectorSensor sensor) { @@ -1245,7 +1245,7 @@ public ReadOnlyCollection GetStackedObservations() /// /// See [Agents - Actions] for more information on masking actions. /// - /// [Agents - Actions]: https://github.com/Unity-Technologies/ml-agents/blob/release_22_docs/docs/Learning-Environment-Design-Agents.md#actions + /// [Agents - Actions]: https://github.com/Unity-Technologies/ml-agents/blob/release_23_docs/docs/Learning-Environment-Design-Agents.md#actions /// /// public virtual void WriteDiscreteActionMask(IDiscreteActionMask actionMask) { } @@ -1312,7 +1312,7 @@ public virtual void WriteDiscreteActionMask(IDiscreteActionMask actionMask) { } /// /// For more information about implementing agent actions see [Agents - Actions]. /// - /// [Agents - Actions]: https://github.com/Unity-Technologies/ml-agents/blob/release_22_docs/docs/Learning-Environment-Design-Agents.md#actions + /// [Agents - Actions]: https://github.com/Unity-Technologies/ml-agents/blob/release_23_docs/docs/Learning-Environment-Design-Agents.md#actions /// /// /// diff --git a/com.unity.ml-agents/Runtime/Demonstrations/DemonstrationRecorder.cs b/com.unity.ml-agents/Runtime/Demonstrations/DemonstrationRecorder.cs index a4101f8976..af0d0d6c61 100644 --- a/com.unity.ml-agents/Runtime/Demonstrations/DemonstrationRecorder.cs +++ b/com.unity.ml-agents/Runtime/Demonstrations/DemonstrationRecorder.cs @@ -19,7 +19,7 @@ namespace Unity.MLAgents.Demonstrations /// See [Imitation Learning - Recording Demonstrations] for more information. /// /// [GameObject]: https://docs.unity3d.com/Manual/GameObjects.html - /// [Imitation Learning - Recording Demonstrations]: https://github.com/Unity-Technologies/ml-agents/blob/release_22_docs/docs//Learning-Environment-Design-Agents.md#recording-demonstrations + /// [Imitation Learning - Recording Demonstrations]: https://github.com/Unity-Technologies/ml-agents/blob/release_23_docs/docs//Learning-Environment-Design-Agents.md#recording-demonstrations /// [RequireComponent(typeof(Agent))] [AddComponentMenu("ML Agents/Demonstration Recorder", (int)MenuGroup.Default)] diff --git a/docs/Inference-Engine.md b/docs/Inference-Engine.md index 0f7c3e3402..0fbcbe9fc3 100644 --- a/docs/Inference-Engine.md +++ b/docs/Inference-Engine.md @@ -38,9 +38,9 @@ The ML-Agents Toolkit only supports the models created with our trainers. Model loading expects certain conventions for constants and tensor names. While it is possible to construct a model that follows these conventions, we don't provide any additional help for this. More details can be found in -[TensorNames.cs](https://github.com/Unity-Technologies/ml-agents/blob/release_22_docs/com.unity.ml-agents/Runtime/Inference/TensorNames.cs) +[TensorNames.cs](https://github.com/Unity-Technologies/ml-agents/blob/release_23_docs/com.unity.ml-agents/Runtime/Inference/TensorNames.cs) and -[SentisModelParamLoader.cs](https://github.com/Unity-Technologies/ml-agents/blob/release_22_docs/com.unity.ml-agents/Runtime/Inference/SentisModelParamLoader.cs). +[SentisModelParamLoader.cs](https://github.com/Unity-Technologies/ml-agents/blob/release_23_docs/com.unity.ml-agents/Runtime/Inference/SentisModelParamLoader.cs). If you wish to run inference on an externally trained model, you should use Inference Engine directly, instead of trying to run it through ML-Agents. diff --git a/docs/Installation-Anaconda-Windows.md b/docs/Installation-Anaconda-Windows.md index 3b80adbdf0..c582422fda 100644 --- a/docs/Installation-Anaconda-Windows.md +++ b/docs/Installation-Anaconda-Windows.md @@ -123,10 +123,10 @@ commands in an Anaconda Prompt _(if you open a new prompt, be sure to activate the ml-agents Conda environment by typing `activate ml-agents`)_: ```sh -git clone --branch release_22 https://github.com/Unity-Technologies/ml-agents.git +git clone --branch release_23 https://github.com/Unity-Technologies/ml-agents.git ``` -The `--branch release_22` option will switch to the tag of the latest stable +The `--branch release_23` option will switch to the tag of the latest stable release. Omitting that will get the `main` branch which is potentially unstable. diff --git a/docs/Installation.md b/docs/Installation.md index 43416dea65..86befc183b 100644 --- a/docs/Installation.md +++ b/docs/Installation.md @@ -72,10 +72,10 @@ of our tutorials / guides assume you have access to our example environments). the repository if you would like to explore more examples. ```sh -git clone --branch release_22 https://github.com/Unity-Technologies/ml-agents.git +git clone --branch release_23 https://github.com/Unity-Technologies/ml-agents.git ``` -The `--branch release_22` option will switch to the tag of the latest stable +The `--branch release_23` option will switch to the tag of the latest stable release. Omitting that will get the `develop` branch which is potentially unstable. However, if you find that a release branch does not work, the recommendation is to use the `develop` branch as it may have potential fixes for bugs and dependency issues. @@ -90,7 +90,7 @@ git clone https://github.com/Unity-Technologies/ml-agents.git You will need to clone the repository if you plan to modify or extend the ML-Agents Toolkit for your purposes. If you plan to contribute those changes -back, make sure to clone the `develop` branch (by omitting `--branch release_22` +back, make sure to clone the `develop` branch (by omitting `--branch release_23` from the command above). See our [Contributions Guidelines](../com.unity.ml-agents/CONTRIBUTING.md) for more information on contributing to the ML-Agents Toolkit. diff --git a/docs/Learning-Environment-Design-Agents.md b/docs/Learning-Environment-Design-Agents.md index 8000a88465..fb904678c0 100644 --- a/docs/Learning-Environment-Design-Agents.md +++ b/docs/Learning-Environment-Design-Agents.md @@ -584,7 +584,7 @@ To allow more variety of observations that grid sensor can capture, the `GridSensorComponent` and the underlying `GridSensorBase` also provides interfaces that can be overridden to collect customized observation from detected objects. See the doc on -[extending grid Sensors](https://github.com/Unity-Technologies/ml-agents/blob/release_22_docs/com.unity.ml-agents.extensions/Documentation~/CustomGridSensors.md) +[extending grid Sensors](https://github.com/Unity-Technologies/ml-agents/blob/release_23_docs/com.unity.ml-agents.extensions/Documentation~/CustomGridSensors.md) for more details on custom grid sensors. __Note__: The `GridSensor` only works in 3D environments and will not behave diff --git a/docs/ML-Agents-Toolkit-Documentation.md b/docs/ML-Agents-Toolkit-Documentation.md index 0a1226ca91..576895c067 100644 --- a/docs/ML-Agents-Toolkit-Documentation.md +++ b/docs/ML-Agents-Toolkit-Documentation.md @@ -41,9 +41,9 @@ ## Python Tutorial with Google Colab -- [Using a UnityEnvironment](https://colab.research.google.com/github/Unity-Technologies/ml-agents/blob/release_22_docs/colab/Colab_UnityEnvironment_1_Run.ipynb) -- [Q-Learning with a UnityEnvironment](https://colab.research.google.com/github/Unity-Technologies/ml-agents/blob/release_22_docs/colab/Colab_UnityEnvironment_2_Train.ipynb) -- [Using Side Channels on a UnityEnvironment](https://colab.research.google.com/github/Unity-Technologies/ml-agents/blob/release_22_docs/colab/Colab_UnityEnvironment_3_SideChannel.ipynb) +- [Using a UnityEnvironment](https://colab.research.google.com/github/Unity-Technologies/ml-agents/blob/release_23_docs/colab/Colab_UnityEnvironment_1_Run.ipynb) +- [Q-Learning with a UnityEnvironment](https://colab.research.google.com/github/Unity-Technologies/ml-agents/blob/release_23_docs/colab/Colab_UnityEnvironment_2_Train.ipynb) +- [Using Side Channels on a UnityEnvironment](https://colab.research.google.com/github/Unity-Technologies/ml-agents/blob/release_23_docs/colab/Colab_UnityEnvironment_3_SideChannel.ipynb) ## Help diff --git a/docs/Migrating.md b/docs/Migrating.md index aba3d8565f..9e6796dfcd 100644 --- a/docs/Migrating.md +++ b/docs/Migrating.md @@ -223,7 +223,7 @@ folder - The Parameter Randomization feature has been merged with the Curriculum feature. It is now possible to specify a sampler in the lesson of a Curriculum. Curriculum has been refactored and is now specified at the level of the parameter, not the behavior. More information -[here](https://github.com/Unity-Technologies/ml-agents/blob/release_22_docs/docs/Training-ML-Agents.md).(#4160) +[here](https://github.com/Unity-Technologies/ml-agents/blob/release_23_docs/docs/Training-ML-Agents.md).(#4160) ### Steps to Migrate - The configuration format for curriculum and parameter randomization has changed. To upgrade your configuration files, diff --git a/docs/Readme.md b/docs/Readme.md index aa6b4d8ade..3d7ee2105d 100644 --- a/docs/Readme.md +++ b/docs/Readme.md @@ -1,6 +1,6 @@ # Unity ML-Agents Toolkit -[![docs badge](https://img.shields.io/badge/docs-reference-blue.svg)](https://github.com/Unity-Technologies/ml-agents/tree/release_22_docs/docs/) +[![docs badge](https://img.shields.io/badge/docs-reference-blue.svg)](https://github.com/Unity-Technologies/ml-agents/tree/release_23_docs/docs/) [![license badge](https://img.shields.io/badge/license-Apache--2.0-green.svg)](../LICENSE.md) @@ -44,7 +44,7 @@ See our [ML-Agents Overview](ML-Agents-Overview.md) page for detailed descriptions of all these features. Or go straight to our [web docs](https://unity-technologies.github.io/ml-agents/). ## Releases & Documentation -**Our latest, stable release is `Release 22`. Click +**Our latest, stable release is `Release 23`. Click [here](Getting-Started.md) to get started with the latest release of ML-Agents.** @@ -65,10 +65,10 @@ under active development and may be unstable. A few helpful guidelines: - The `com.unity.ml-agents` package is [verified](https://docs.unity3d.com/2020.1/Documentation/Manual/pack-safe.html) for Unity 2020.1 and later. Verified packages releases are numbered 1.0.x. -| **Version** | **Release Date** | **Source** | **Documentation** | **Download** | **Python Package** | **Unity Package** | -|:--------------------------:|:------:|:-------------:|:-------:|:------------:|:------------:|:------------:| -| **Release 22** | **October 5, 2024** | **[source](https://github.com/Unity-Technologies/ml-agents/tree/release_22)** | **[docs](https://unity-technologies.github.io/ml-agents/)** | **[download](https://github.com/Unity-Technologies/ml-agents/archive/release_22.zip)** | **[1.1.0](https://pypi.org/project/mlagents/1.1.0/)** | **[3.0.0](https://docs.unity3d.com/Packages/com.unity.ml-agents@3.0/manual/index.html)** | -| **develop (unstable)** | -- | [source](https://github.com/Unity-Technologies/ml-agents/tree/develop) | [docs](https://unity-technologies.github.io/ml-agents/) | [download](https://github.com/Unity-Technologies/ml-agents/archive/develop.zip) | -- | -- | +| **Version** | **Release Date** | **Source** | **Documentation** | **Download** | **Python Package** | **Unity Package** | +|:----------------------:|:-----------------:|:-----------------------------------------------------------------------------:|:-------:|:--------------------------------------------------------------------------------------:|:------------:|:----------------------------------------------------------------------------------------:| +| **Release 23** | **July 11, 2025** | **[source](https://github.com/Unity-Technologies/ml-agents/tree/release_23)** | **[docs](https://unity-technologies.github.io/ml-agents/)** | **[download](https://github.com/Unity-Technologies/ml-agents/archive/release_23.zip)** | **[1.1.0](https://pypi.org/project/mlagents/1.1.0/)** | **[4.0.0](https://docs.unity3d.com/Packages/com.unity.ml-agents@4.0/manual/index.html)** | +| **develop (unstable)** | -- | [source](https://github.com/Unity-Technologies/ml-agents/tree/develop) | [docs](https://unity-technologies.github.io/ml-agents/) | [download](https://github.com/Unity-Technologies/ml-agents/archive/develop.zip) | -- | -- | diff --git a/docs/Training-on-Amazon-Web-Service.md b/docs/Training-on-Amazon-Web-Service.md index d6549044d2..84109eff43 100644 --- a/docs/Training-on-Amazon-Web-Service.md +++ b/docs/Training-on-Amazon-Web-Service.md @@ -69,7 +69,7 @@ After launching your EC2 instance using the ami and ssh into it: 2. Clone the ML-Agents repo and install the required Python packages ```sh - git clone --branch release_22 https://github.com/Unity-Technologies/ml-agents.git + git clone --branch release_23 https://github.com/Unity-Technologies/ml-agents.git cd ml-agents/ml-agents/ pip3 install -e . ``` diff --git a/utils/make_readme_table.py b/utils/make_readme_table.py index bf467fd731..cdccf7e828 100644 --- a/utils/make_readme_table.py +++ b/utils/make_readme_table.py @@ -142,6 +142,7 @@ def pypi_link(self): ReleaseInfo("release_20", "2.3.0", "0.30.0", "November 21, 2022"), ReleaseInfo("release_21", "3.0.0-exp.1", "1.0.0", "October 9, 2023"), ReleaseInfo("release_22", "3.0.0", "1.1.0", "October 5, 2024"), + ReleaseInfo("release_23", "4.0.0", "1.1.0", "July 11, 2025"), # Verified releases # ReleaseInfo("", "1.0.8", "0.16.1", "May 26, 2021", is_verified=True), # ReleaseInfo("", "1.0.7", "0.16.1", "March 8, 2021", is_verified=True),