diff --git a/com.unity.ml-agents/Runtime/Academy.cs b/com.unity.ml-agents/Runtime/Academy.cs
index 3249a52221..ec2c34c4d3 100644
--- a/com.unity.ml-agents/Runtime/Academy.cs
+++ b/com.unity.ml-agents/Runtime/Academy.cs
@@ -89,7 +89,7 @@ public class Academy : IDisposable
///
/// -
/// 1.3.0
- /// Support action spaces with both continuous and discrete actions.
+ /// Support both continuous and discrete actions.
///
///
///
@@ -590,7 +590,7 @@ void EnvironmentReset()
/// NNModel and the InferenceDevice as provided.
///
/// The NNModel the ModelRunner must use.
- /// Description of the action spaces for the Agent.
+ /// Description of the actions for the Agent.
///
/// The inference device (CPU or GPU) the ModelRunner will use.
///
diff --git a/com.unity.ml-agents/Runtime/Actuators/ActionSpec.cs b/com.unity.ml-agents/Runtime/Actuators/ActionSpec.cs
index c33dea2d22..c266d8cc9d 100644
--- a/com.unity.ml-agents/Runtime/Actuators/ActionSpec.cs
+++ b/com.unity.ml-agents/Runtime/Actuators/ActionSpec.cs
@@ -6,7 +6,7 @@
namespace Unity.MLAgents.Actuators
{
///
- /// Defines the structure of an Action Space to be used by the Actuator system.
+ /// Defines the structure of the actions to be used by the Actuator system.
///
[Serializable]
public struct ActionSpec
@@ -15,9 +15,9 @@ public struct ActionSpec
int m_NumContinuousActions;
///
- /// An array of branch sizes for our action space.
+ /// An array of branch sizes for discrete actions.
///
- /// For an IActuator that uses a Discrete , the number of
+ /// For an IActuator that uses discrete actions, the number of
/// branches is the Length of the Array and each index contains the branch size.
/// The cumulative sum of the total number of discrete actions can be retrieved
/// by the property.
@@ -27,12 +27,12 @@ public struct ActionSpec
public int[] BranchSizes;
///
- /// The number of actions for a Continuous .
+ /// The number of continuous actions that an Agent can take.
///
public int NumContinuousActions { get { return m_NumContinuousActions; } set { m_NumContinuousActions = value; } }
///
- /// The number of branches for a Discrete .
+ /// The number of branches for discrete actions that an Agent can take.
///
public int NumDiscreteActions { get { return BranchSizes == null ? 0 : BranchSizes.Length; } }
@@ -57,12 +57,11 @@ public static ActionSpec MakeContinuous(int numActions)
/// Creates a Discrete with the array of branch sizes that
/// represents the action space.
///
- /// The array of branch sizes for the discrete action space. Each index
+ /// The array of branch sizes for the discrete actions. Each index
/// contains the number of actions available for that branch.
/// An Discrete ActionSpec initialized with the array of branch sizes.
public static ActionSpec MakeDiscrete(params int[] branchSizes)
{
- var numActions = branchSizes.Length;
var actuatorSpace = new ActionSpec(0, branchSizes);
return actuatorSpace;
}
diff --git a/com.unity.ml-agents/Runtime/Actuators/ActuatorComponent.cs b/com.unity.ml-agents/Runtime/Actuators/ActuatorComponent.cs
index 6823636c00..d62fcd7c25 100644
--- a/com.unity.ml-agents/Runtime/Actuators/ActuatorComponent.cs
+++ b/com.unity.ml-agents/Runtime/Actuators/ActuatorComponent.cs
@@ -15,7 +15,7 @@ public abstract class ActuatorComponent : MonoBehaviour
public abstract IActuator CreateActuator();
///
- /// The specification of the Action space for this ActuatorComponent.
+ /// The specification of the possible actions for this ActuatorComponent.
/// This must produce the same results as the corresponding IActuator's ActionSpec.
///
///
diff --git a/com.unity.ml-agents/Runtime/Actuators/ActuatorManager.cs b/com.unity.ml-agents/Runtime/Actuators/ActuatorManager.cs
index 72e62dab4f..d85424f171 100644
--- a/com.unity.ml-agents/Runtime/Actuators/ActuatorManager.cs
+++ b/com.unity.ml-agents/Runtime/Actuators/ActuatorManager.cs
@@ -73,7 +73,7 @@ void ReadyActuatorsForExecution()
}
///
- /// This method validates that all s have unique names and equivalent action space types
+ /// This method validates that all s have unique names
/// if the `DEBUG` preprocessor macro is defined, and allocates the appropriate buffers to manage the actions for
/// all of the s that may live on a particular object.
///
@@ -90,7 +90,6 @@ internal void ReadyActuatorsForExecution(IList actuators, int numCont
}
#if DEBUG
// Make sure the names are actually unique
- // Make sure all Actuators have the same SpaceType
ValidateActuators();
#endif
@@ -272,7 +271,7 @@ void SortActuators()
}
///
- /// Validates that the IActuators managed by this object have unique names and equivalent action space types.
+ /// Validates that the IActuators managed by this object have unique names.
/// Each Actuator needs to have a unique name in order for this object to ensure that the storage of action
/// buffers, and execution of Actuators remains deterministic across different sessions of running.
///
diff --git a/com.unity.ml-agents/Runtime/Actuators/IActionReceiver.cs b/com.unity.ml-agents/Runtime/Actuators/IActionReceiver.cs
index 4736e34c2b..263898f900 100644
--- a/com.unity.ml-agents/Runtime/Actuators/IActionReceiver.cs
+++ b/com.unity.ml-agents/Runtime/Actuators/IActionReceiver.cs
@@ -121,6 +121,7 @@ public void Clear()
///
/// Check if the is empty.
///
+ /// Whether the buffers are empty.
public bool IsEmpty()
{
return ContinuousActions.IsEmpty() && DiscreteActions.IsEmpty();
diff --git a/com.unity.ml-agents/Runtime/Actuators/IActuator.cs b/com.unity.ml-agents/Runtime/Actuators/IActuator.cs
index cf7f43c87b..9a97771060 100644
--- a/com.unity.ml-agents/Runtime/Actuators/IActuator.cs
+++ b/com.unity.ml-agents/Runtime/Actuators/IActuator.cs
@@ -6,7 +6,7 @@ namespace Unity.MLAgents.Actuators
public interface IActuator : IActionReceiver
{
///
- /// The specification of the Action space for this IActuator.
+ /// The specification of the actions for this IActuator.
///
///
ActionSpec ActionSpec { get; }
diff --git a/com.unity.ml-agents/Runtime/Agent.cs b/com.unity.ml-agents/Runtime/Agent.cs
index f28c88a5d9..b10539f35b 100644
--- a/com.unity.ml-agents/Runtime/Agent.cs
+++ b/com.unity.ml-agents/Runtime/Agent.cs
@@ -19,9 +19,9 @@ namespace Unity.MLAgents
internal struct AgentInfo
{
///
- /// Keeps track of the last vector action taken by the Brain.
+ /// Keeps track of the last actions taken by the Brain.
///
- public ActionBuffers storedVectorActions;
+ public ActionBuffers storedActions;
///
/// For discrete control, specifies the actions that the agent cannot take.
@@ -52,17 +52,17 @@ internal struct AgentInfo
public void ClearActions()
{
- storedVectorActions.Clear();
+ storedActions.Clear();
}
public void CopyActions(ActionBuffers actionBuffers)
{
- var continuousActions = storedVectorActions.ContinuousActions;
+ var continuousActions = storedActions.ContinuousActions;
for (var i = 0; i < actionBuffers.ContinuousActions.Length; i++)
{
continuousActions[i] = actionBuffers.ContinuousActions[i];
}
- var discreteActions = storedVectorActions.DiscreteActions;
+ var discreteActions = storedActions.DiscreteActions;
for (var i = 0; i < actionBuffers.DiscreteActions.Length; i++)
{
discreteActions[i] = actionBuffers.DiscreteActions[i];
@@ -438,7 +438,7 @@ public void LazyInitialize()
InitializeSensors();
}
- m_Info.storedVectorActions = new ActionBuffers(
+ m_Info.storedActions = new ActionBuffers(
new float[m_ActuatorManager.NumContinuousActions],
new int[m_ActuatorManager.NumDiscreteActions]
);
@@ -557,7 +557,7 @@ void NotifyAgentDone(DoneReason doneReason)
m_CumulativeReward = 0f;
m_RequestAction = false;
m_RequestDecision = false;
- m_Info.storedVectorActions.Clear();
+ m_Info.storedActions.Clear();
}
///
@@ -886,12 +886,22 @@ public virtual void Initialize() { }
///
public virtual void Heuristic(in ActionBuffers actionsOut)
{
+ var brainParams = m_PolicyFactory.BrainParameters;
+ var actionSpec = brainParams.ActionSpec;
+ // For continuous and discrete actions together, we don't need to fall back to the legacy method
+ if (actionSpec.NumContinuousActions > 0 && actionSpec.NumDiscreteActions > 0)
+ {
+ Debug.LogWarning("Heuristic method called but not implemented. Clearing ActionBuffers.");
+ actionsOut.Clear();
+ return;
+ }
+
// Disable deprecation warnings so we can call the legacy overload.
#pragma warning disable CS0618
// The default implementation of Heuristic calls the
// obsolete version for backward compatibility
- switch (m_PolicyFactory.BrainParameters.VectorActionSpaceType)
+ switch (brainParams.VectorActionSpaceType)
{
case SpaceType.Continuous:
Heuristic(actionsOut.ContinuousActions.Array);
@@ -1038,7 +1048,7 @@ void SendInfoToBrain()
CollectObservations(collectObservationsSensor);
}
}
- using (TimerStack.Instance.Scoped("CollectDiscreteActionMasks"))
+ using (TimerStack.Instance.Scoped("WriteActionMask"))
{
m_ActuatorManager.WriteActionMask();
}
@@ -1135,7 +1145,7 @@ public ReadOnlyCollection GetObservations()
}
///
- /// Implement `CollectDiscreteActionMasks()` to collects the masks for discrete
+ /// Implement `WriteDiscreteActionMask()` to collects the masks for discrete
/// actions. When using discrete actions, the agent will not perform the masked
/// action.
///
@@ -1144,7 +1154,7 @@ public ReadOnlyCollection GetObservations()
///
///
/// When using Discrete Control, you can prevent the Agent from using a certain
- /// action by masking it with .
+ /// action by masking it with .
///
/// See [Agents - Actions] for more information on masking actions.
///
@@ -1168,30 +1178,29 @@ public virtual void WriteDiscreteActionMask(IDiscreteActionMask actionMask)
/// on the provided action.
///
///
- /// An action is passed to this function in the form of an array vector. Your
- /// implementation must use the array to direct the agent's behavior for the
+ /// An action is passed to this function in the form of an .
+ /// Your implementation must use the array to direct the agent's behavior for the
/// current step.
///
- /// You decide how many elements you need in the action array to control your
+ /// You decide how many elements you need in the ActionBuffers to control your
/// agent and what each element means. For example, if you want to apply a
/// force to move an agent around the environment, you can arbitrarily pick
- /// three values in the action array to use as the force components. During
- /// training, the agent's policy learns to set those particular elements of
+ /// three values in ActionBuffers.ContinuousActions array to use as the force components.
+ /// During training, the agent's policy learns to set those particular elements of
/// the array to maximize the training rewards the agent receives. (Of course,
/// if you implement a function, it must use the same
/// elements of the action array for the same purpose since there is no learning
/// involved.)
///
- /// Actions for an agent can be either *Continuous* or *Discrete*. Specify which
- /// type of action space an agent uses, along with the size of the action array,
- /// in the of the agent's associated
+ /// An Agent can use continuous and/or discrete actions. Configure this along with the size
+ /// of the action array, in the of the agent's associated
/// component.
///
- /// When an agent uses the continuous action space, the values in the action
+ /// When an agent uses continuous actions, the values in the ActionBuffers.ContinuousActions
/// array are floating point numbers. You should clamp the values to the range,
/// -1..1, to increase numerical stability during training.
///
- /// When an agent uses the discrete action space, the values in the action array
+ /// When an agent uses discrete actions, the values in the ActionBuffers.DiscreteActions array
/// are integers that each represent a specific, discrete action. For example,
/// you could define a set of discrete actions such as:
///
@@ -1204,8 +1213,8 @@ public virtual void WriteDiscreteActionMask(IDiscreteActionMask actionMask)
///
///
/// When making a decision, the agent picks one of the five actions and puts the
- /// corresponding integer value in the action vector. For example, if the agent
- /// decided to move left, the action vector parameter would contain an array with
+ /// corresponding integer value in the ActionBuffers.DiscreteActions array. For example, if the agent
+ /// decided to move left, the ActionBuffers.DiscreteActions parameter would be an array with
/// a single element with the value 1.
///
/// You can define multiple sets, or branches, of discrete actions to allow an
@@ -1213,15 +1222,14 @@ public virtual void WriteDiscreteActionMask(IDiscreteActionMask actionMask)
/// use one branch for movement and another branch for throwing a ball left, right,
/// up, or down, to allow the agent to do both in the same step.
///
- /// The action vector of a discrete action space contains one element for each
- /// branch. The value of each element is the integer representing the chosen
- /// action for that branch. The agent always chooses one action for each
- /// branch.
+ /// The ActionBuffers.DiscreteActions array of an agent with discrete actions contains one
+ /// element for each branch. The value of each element is the integer representing the
+ /// chosen action for that branch. The agent always chooses one action for each branch.
///
- /// When you use the discrete action space, you can prevent the training process
+ /// When you use the discrete actions, you can prevent the training process
/// or the neural network model from choosing specific actions in a step by
- /// implementing the
- /// function. For example, if your agent is next to a wall, you could mask out any
+ /// implementing the
+ /// method. For example, if your agent is next to a wall, you could mask out any
/// actions that would result in the agent trying to move into the wall.
///
/// For more information about implementing agent actions see [Agents - Actions].
@@ -1233,6 +1241,14 @@ public virtual void WriteDiscreteActionMask(IDiscreteActionMask actionMask)
///
public virtual void OnActionReceived(ActionBuffers actions)
{
+ var actionSpec = m_PolicyFactory.BrainParameters.ActionSpec;
+ // For continuous and discrete actions together, we don't need to fall back to the legacy method
+ if (actionSpec.NumContinuousActions > 0 && actionSpec.NumDiscreteActions > 0)
+ {
+ // Nothing implemented.
+ return;
+ }
+
if (!actions.ContinuousActions.IsEmpty())
{
m_LegacyActionCache = actions.ContinuousActions.Array;
diff --git a/com.unity.ml-agents/Runtime/Agent.deprecated.cs b/com.unity.ml-agents/Runtime/Agent.deprecated.cs
index 582882637f..514fe7218f 100644
--- a/com.unity.ml-agents/Runtime/Agent.deprecated.cs
+++ b/com.unity.ml-agents/Runtime/Agent.deprecated.cs
@@ -42,7 +42,14 @@ public virtual void OnActionReceived(float[] vectorAction) { }
[Obsolete("GetAction has been deprecated, please use GetStoredActionBuffers instead.")]
public float[] GetAction()
{
- var storedAction = m_Info.storedVectorActions;
+ var actionSpec = m_PolicyFactory.BrainParameters.ActionSpec;
+ // For continuous and discrete actions together, this shouldn't be called because we can only return one.
+ if (actionSpec.NumContinuousActions > 0 && actionSpec.NumDiscreteActions > 0)
+ {
+ Debug.LogWarning("Agent.GetAction() when both continuous and discrete actions are in use. Use Agent.GetStoredActionBuffers() instead.");
+ }
+
+ var storedAction = m_Info.storedActions;
if (!storedAction.ContinuousActions.IsEmpty())
{
return storedAction.ContinuousActions.Array;
diff --git a/com.unity.ml-agents/Runtime/Analytics/InferenceAnalytics.cs b/com.unity.ml-agents/Runtime/Analytics/InferenceAnalytics.cs
index 0b509eb491..bb19532478 100644
--- a/com.unity.ml-agents/Runtime/Analytics/InferenceAnalytics.cs
+++ b/com.unity.ml-agents/Runtime/Analytics/InferenceAnalytics.cs
@@ -84,7 +84,7 @@ public static bool IsAnalyticsEnabled()
/// The BehaviorName of the Agent using the model
/// Whether inference is being performed on the CPU or GPU
/// List of ISensors for the Agent. Used to generate information about the observation space.
- /// ActionSpec for the Agent. Used to generate information about the action space.
+ /// ActionSpec for the Agent. Used to generate information about the actions.
///
public static void InferenceModelSet(
NNModel nnModel,
diff --git a/com.unity.ml-agents/Runtime/Communicator/GrpcExtensions.cs b/com.unity.ml-agents/Runtime/Communicator/GrpcExtensions.cs
index 28a79977f9..91c3c87f54 100644
--- a/com.unity.ml-agents/Runtime/Communicator/GrpcExtensions.cs
+++ b/com.unity.ml-agents/Runtime/Communicator/GrpcExtensions.cs
@@ -29,15 +29,15 @@ public static AgentInfoActionPairProto ToInfoActionPairProto(this AgentInfo ai)
var agentActionProto = new AgentActionProto();
- if (!ai.storedVectorActions.IsEmpty())
+ if (!ai.storedActions.IsEmpty())
{
- if (!ai.storedVectorActions.ContinuousActions.IsEmpty())
+ if (!ai.storedActions.ContinuousActions.IsEmpty())
{
- agentActionProto.ContinuousActions.AddRange(ai.storedVectorActions.ContinuousActions.Array);
+ agentActionProto.ContinuousActions.AddRange(ai.storedActions.ContinuousActions.Array);
}
- if (!ai.storedVectorActions.DiscreteActions.IsEmpty())
+ if (!ai.storedActions.DiscreteActions.IsEmpty())
{
- agentActionProto.DiscreteActions.AddRange(ai.storedVectorActions.DiscreteActions.Array);
+ agentActionProto.DiscreteActions.AddRange(ai.storedActions.DiscreteActions.Array);
}
}
@@ -126,7 +126,7 @@ public static BrainParametersProto ToProto(this BrainParameters bp, string name,
/// Converts an ActionSpec into to a Protobuf BrainInfoProto so it can be sent.
///
/// The BrainInfoProto generated.
- /// Description of the action spaces for the Agent.
+ /// Description of the actions for the Agent.
/// The name of the brain.
/// Whether or not the Brain is training.
public static BrainParametersProto ToBrainParametersProto(this ActionSpec actionSpec, string name, bool isTraining)
@@ -168,8 +168,8 @@ public static BrainParameters ToBrainParameters(this BrainParametersProto bpp)
ActionSpec actionSpec;
if (bpp.ActionSpec == null)
{
- var spaceType = (SpaceType)bpp.VectorActionSpaceTypeDeprecated;
- if (spaceType == SpaceType.Continuous)
+ var spaceType = bpp.VectorActionSpaceTypeDeprecated;
+ if (spaceType == SpaceTypeProto.Continuous)
{
actionSpec = ActionSpec.MakeContinuous(bpp.VectorActionSizeDeprecated.ToArray()[0]);
}
diff --git a/com.unity.ml-agents/Runtime/Communicator/ICommunicator.cs b/com.unity.ml-agents/Runtime/Communicator/ICommunicator.cs
index 78c0368437..109d31b2ee 100644
--- a/com.unity.ml-agents/Runtime/Communicator/ICommunicator.cs
+++ b/com.unity.ml-agents/Runtime/Communicator/ICommunicator.cs
@@ -139,7 +139,7 @@ internal interface ICommunicator : IDisposable
/// Registers a new Brain to the Communicator.
///
/// The name or key uniquely identifying the Brain.
- /// Description of the action spaces for the Agent.
+ /// Description of the actions for the Agent.
void SubscribeBrain(string name, ActionSpec actionSpec);
///
diff --git a/com.unity.ml-agents/Runtime/Communicator/RpcCommunicator.cs b/com.unity.ml-agents/Runtime/Communicator/RpcCommunicator.cs
index 20165bf417..ea0aa49e69 100644
--- a/com.unity.ml-agents/Runtime/Communicator/RpcCommunicator.cs
+++ b/com.unity.ml-agents/Runtime/Communicator/RpcCommunicator.cs
@@ -173,7 +173,7 @@ public UnityRLInitParameters Initialize(CommunicatorInitParameters initParameter
/// Adds the brain to the list of brains which will be sending information to External.
///
/// Brain key.
- /// Description of the action spaces for the Agent.
+ /// Description of the actions for the Agent.
public void SubscribeBrain(string brainKey, ActionSpec actionSpec)
{
if (m_BehaviorNames.Contains(brainKey))
diff --git a/com.unity.ml-agents/Runtime/Inference/BarracudaModelExtensions.cs b/com.unity.ml-agents/Runtime/Inference/BarracudaModelExtensions.cs
index 6ea7e9cc13..e3564afd9c 100644
--- a/com.unity.ml-agents/Runtime/Inference/BarracudaModelExtensions.cs
+++ b/com.unity.ml-agents/Runtime/Inference/BarracudaModelExtensions.cs
@@ -275,13 +275,13 @@ public static string DiscreteOutputName(this Model model)
}
///
- /// Check if the model supports hybrid action spaces.
+ /// Check if the model supports both continuous and discrete actions.
/// If not, the model should be handled differently and use the deprecated fields.
///
///
/// The Barracuda engine model for loading static parameters.
///
- /// True if the model supports hybrid action spaces.
+ /// True if the model supports both continuous and discrete actions.
public static bool SupportsContinuousAndDiscrete(this Model model)
{
return model == null ||
diff --git a/com.unity.ml-agents/Runtime/Inference/GeneratorImpl.cs b/com.unity.ml-agents/Runtime/Inference/GeneratorImpl.cs
index 6f3a723478..d89f378330 100644
--- a/com.unity.ml-agents/Runtime/Inference/GeneratorImpl.cs
+++ b/com.unity.ml-agents/Runtime/Inference/GeneratorImpl.cs
@@ -268,7 +268,7 @@ public void Generate(TensorProxy tensorProxy, int batchSize, IEnumerable
/// The Barracuda model to load
- /// Description of the action spaces for the Agent.
+ /// Description of the actions for the Agent.
/// Inference execution device. CPU is the fastest
/// option for most of ML Agents models.
/// The seed that will be used to initialize the RandomNormal
diff --git a/com.unity.ml-agents/Runtime/Inference/TensorApplier.cs b/com.unity.ml-agents/Runtime/Inference/TensorApplier.cs
index 6ad0ccfdae..3b3a2bf98b 100644
--- a/com.unity.ml-agents/Runtime/Inference/TensorApplier.cs
+++ b/com.unity.ml-agents/Runtime/Inference/TensorApplier.cs
@@ -40,7 +40,7 @@ public interface IApplier
///
/// Returns a new TensorAppliers object.
///
- /// Description of the action spaces for the Agent.
+ /// Description of the actions for the Agent.
/// The seed the Appliers will be initialized with.
/// Tensor allocator
/// Dictionary of AgentInfo.id to memory used to pass to the inference model.
diff --git a/com.unity.ml-agents/Runtime/Policies/BrainParameters.cs b/com.unity.ml-agents/Runtime/Policies/BrainParameters.cs
index d0375d6d90..fde0d04a12 100644
--- a/com.unity.ml-agents/Runtime/Policies/BrainParameters.cs
+++ b/com.unity.ml-agents/Runtime/Policies/BrainParameters.cs
@@ -6,8 +6,9 @@
namespace Unity.MLAgents.Policies
{
///
- /// Whether the action space is discrete or continuous.
+ /// This is deprecated. Agents can now use both continuous and discrete actions together.
///
+ [Obsolete("Continuous and discrete actions on the same Agent are now supported; see ActionSpec.")]
public enum SpaceType
{
///
@@ -55,7 +56,7 @@ public class BrainParameters : ISerializationCallbackReceiver
internal ActionSpec m_ActionSpec = new ActionSpec(0, null);
///
- /// The specification of the Action space for the BrainParameters.
+ /// The specification of the Actions for the BrainParameters.
///
public ActionSpec ActionSpec
{
@@ -69,14 +70,14 @@ public ActionSpec ActionSpec
}
///
- /// (Deprecated) The size of the action space.
+ /// (Deprecated) The number of possible actions.
///
/// The size specified is interpreted differently depending on whether
- /// the agent uses the continuous or the discrete action space.
+ /// the agent uses the continuous or the discrete actions.
///
- /// For the continuous action space: the length of the float vector that represents
+ /// For the continuous actions: the length of the float vector that represents
/// the action.
- /// For the discrete action space: the number of branches in the action space.
+ /// For the discrete actions: the number of branches.
///
[Obsolete("VectorActionSize has been deprecated, please use ActionSpec instead.")]
[FormerlySerializedAs("vectorActionSize")]
diff --git a/com.unity.ml-agents/Tests/Editor/Actuators/ActuatorManagerTests.cs b/com.unity.ml-agents/Tests/Editor/Actuators/ActuatorManagerTests.cs
index e7bb11d243..5fe6ee6485 100644
--- a/com.unity.ml-agents/Tests/Editor/Actuators/ActuatorManagerTests.cs
+++ b/com.unity.ml-agents/Tests/Editor/Actuators/ActuatorManagerTests.cs
@@ -64,6 +64,18 @@ public void TestEnsureBufferDiscrete()
Assert.IsTrue(7 == manager.StoredActions.DiscreteActions.Length);
}
+ [Test]
+ public void TestAllowMixedActions()
+ {
+ // Make sure discrete + continuous actuators are allowed.
+ var manager = new ActuatorManager();
+ var actuator1 = new TestActuator(ActionSpec.MakeDiscrete(new[] { 1, 2, 3, 4 }), "actuator1");
+ var actuator2 = new TestActuator(ActionSpec.MakeContinuous(3), "actuator2");
+ manager.Add(actuator1);
+ manager.Add(actuator2);
+ manager.ReadyActuatorsForExecution(new[] { actuator1, actuator2 }, 3, 10, 4);
+ }
+
[Test]
public void TestFailOnSameActuatorName()
{
diff --git a/com.unity.ml-agents/Tests/Editor/DemonstrationTests.cs b/com.unity.ml-agents/Tests/Editor/DemonstrationTests.cs
index 34911e478a..46394a51b3 100644
--- a/com.unity.ml-agents/Tests/Editor/DemonstrationTests.cs
+++ b/com.unity.ml-agents/Tests/Editor/DemonstrationTests.cs
@@ -69,7 +69,7 @@ public void TestStoreInitialize()
done = true,
episodeId = 5,
maxStepReached = true,
- storedVectorActions = new ActionBuffers(null, new int[] { 0, 1 }),
+ storedActions = new ActionBuffers(null, new int[] { 0, 1 }),
};
diff --git a/com.unity.ml-agents/Tests/Editor/EditModeTestInternalBrainTensorGenerator.cs b/com.unity.ml-agents/Tests/Editor/EditModeTestInternalBrainTensorGenerator.cs
index 51e0d70276..57436e1d67 100644
--- a/com.unity.ml-agents/Tests/Editor/EditModeTestInternalBrainTensorGenerator.cs
+++ b/com.unity.ml-agents/Tests/Editor/EditModeTestInternalBrainTensorGenerator.cs
@@ -47,13 +47,13 @@ static List GetFakeAgents(ObservableAttributeOptions observableAttrib
var infoA = new AgentInfo
{
- storedVectorActions = new ActionBuffers(null, new[] { 1, 2 }),
+ storedActions = new ActionBuffers(null, new[] { 1, 2 }),
discreteActionMasks = null,
};
var infoB = new AgentInfo
{
- storedVectorActions = new ActionBuffers(null, new[] { 3, 4 }),
+ storedActions = new ActionBuffers(null, new[] { 3, 4 }),
discreteActionMasks = new[] { true, false, false, false, false },
};