Search Unity

  1. Unity support for visionOS is now available. Learn more in our blog post.
    Dismiss Notice

Bug ML agents python DOES NOT WANT TO WORK AT ALL

Discussion in 'ML-Agents' started by unitydev199, Jul 30, 2023.

  1. unitydev199

    unitydev199

    Joined:
    Feb 28, 2021
    Posts:
    5
    Before I start, I just want to mention how DIFFICULT it was to set up the ML-agents python package. I had to install different an older version of python, use a specific version of different things etc. I don't think I have ever had such a headache just to install a python package, jesus!

    Anyways, When I don't run the CMD as admin it does a PermissionError:
    [WinError 5] Access is denied
    , so I run it as a admin. But when i finally got to training my agents, i got the error:
    ValueError: semaphore or lock released too many times
    .

    I can't fix this. I'm using python v 3.7.2, MLAgents package is 0.29.0, mlagentenvs 0.29.0.

    Not sure why it should make a huge difference but here is my c# agent code (runs fine in heuristic):
    Code (CSharp):
    1. using UnityEngine;
    2. using Unity.MLAgents;
    3. using Unity.MLAgents.Sensors;
    4. using Unity.MLAgents.Actuators;
    5. using System;
    6.  
    7. public enum Team
    8. {
    9.     Red,
    10.     Blue
    11. }
    12.  
    13. public class Bean : Agent
    14. {
    15.     [SerializeField] private float moveSpeed = 5f;
    16.     private CharacterController characterController;
    17.     private Vector3 moveDirection;
    18.  
    19.     [SerializeField] private Team team;
    20.     [SerializeField] private Transform TeamMate;
    21.     [SerializeField] private Transform OpposingBean1;
    22.     [SerializeField] private Transform OpposingBean2;
    23.     [SerializeField] private Transform ball;
    24.     [SerializeField] private Transform goal1;
    25.     [SerializeField] private Transform goal2;
    26.  
    27.     [SerializeField] private Vector3 AgentStartPos;
    28.  
    29.     private void Start()
    30.     {
    31.         characterController = GetComponent<CharacterController>();
    32.         Debug.Log("Start position: " + ball.transform.position);
    33.     }
    34.  
    35.     public override void OnEpisodeBegin()
    36.     {
    37.         Debug.Log("Episode started.");
    38.         transform.position = AgentStartPos;
    39.         ball.position = new Vector3(0, 1, 0);
    40.     }
    41.  
    42.     // Collect observations from the environment
    43.     public override void CollectObservations(VectorSensor sensor)
    44.     {
    45.         Debug.Log("Collecting observations.");
    46.         sensor.AddObservation(transform.position);
    47.         sensor.AddObservation(TeamMate.transform.position);
    48.         sensor.AddObservation(OpposingBean1.transform.position);
    49.         sensor.AddObservation(OpposingBean2.transform.position);
    50.         sensor.AddObservation(ball.transform.position);
    51.         sensor.AddObservation(goal1.transform.position);
    52.         sensor.AddObservation(goal2.transform.position);
    53.     }
    54.  
    55.     // Process actions received from the agent
    56.     public override void OnActionReceived(ActionBuffers actions)
    57.     {
    58.         float horizontalMovement = actions.ContinuousActions[0];
    59.         float verticalMovement = actions.ContinuousActions[1];
    60.  
    61.         moveDirection = transform.TransformDirection(new Vector3(horizontalMovement, 0f, verticalMovement).normalized);
    62.  
    63.         // Move the agent using the CharacterController (ensure characterController is not null)
    64.         if (characterController != null)
    65.         {
    66.             characterController.SimpleMove(moveDirection * moveSpeed);
    67.             Debug.Log("Moving: " + moveDirection);
    68.         }
    69.         else
    70.         {
    71.             Debug.LogWarning("CharacterController is null. Make sure it's assigned in the Inspector.");
    72.         }
    73.     }
    74.  
    75.     public void AddGoalReward(Team rewardTeam)
    76.     {
    77.         if (rewardTeam == Team.Blue)
    78.         {
    79.             if (team == Team.Blue)
    80.             {
    81.                 AddReward(2f);
    82.                 EndEpisode();
    83.                 Debug.Log("Blue team scored. Reward: +2");
    84.             }
    85.             else
    86.             {
    87.                 AddReward(-2f);
    88.                 EndEpisode();
    89.                 Debug.Log("Red team scored. Reward: -2");
    90.             }
    91.         }
    92.         else if (rewardTeam == Team.Red)
    93.         {
    94.             if (team == Team.Red)
    95.             {
    96.                 AddReward(2f);
    97.                 EndEpisode();
    98.                 Debug.Log("Red team scored. Reward: +2");
    99.             }
    100.             else
    101.             {
    102.                 AddReward(-2f);
    103.                 EndEpisode();
    104.                 Debug.Log("Blue team scored. Reward: -2");
    105.             }
    106.         }
    107.     }
    108.  
    109.     // Heuristic method for manual control (using arrow keys)
    110.     public override void Heuristic(in ActionBuffers actionsOut)
    111.     {
    112.         ActionSegment<float> continuousActions = actionsOut.ContinuousActions;
    113.  
    114.         float horizontalInput = Input.GetAxis("Horizontal");
    115.         float verticalInput = Input.GetAxis("Vertical");
    116.  
    117.         continuousActions[0] = horizontalInput;
    118.         continuousActions[1] = verticalInput;
    119.  
    120.         Debug.Log("Heuristic - Horizontal: " + horizontalInput + ", Vertical: " + verticalInput);
    121.     }
    122. }
    123.  

    Thanks in advance.
     
    JesseSTG likes this.
  2. GamerLordMat

    GamerLordMat

    Joined:
    Oct 10, 2019
    Posts:
    185
    Hello!
    I feel you, it was very tedious to intall it for me too.

    I use Windows 11, release 20 of the mlagents package
    Version information:
    ml-agents: 0.30.0,
    ml-agents-envs: 0.30.0,
    Communicator API: 1.5.0,
    PyTorch: 2.0.1+cu118

    and Python 3.8.16 with Anaconda

    I had to deinstall and install some things manually too (the barracuda liberay broke for me + reinstall some libraries)

    I dont know how to fix your issue though, most simple fix would be just to reinstall it.