Search Unity

Transforming Daydream controller input into a normalised 2D space

Discussion in 'Daydream' started by Nickel0re, Jan 17, 2018.

  1. Nickel0re

    Nickel0re

    Joined:
    Jan 17, 2018
    Posts:
    2
    I'm currently trying to find out whether it's possible to transform Daydream controller input into 2D space. The tool I want to make use of is "Glyph recognition tool". Its developer was kind enough to answer my question about whether its possible to use it with VR-based controllers with this....


    Code (CSharp):
    1.  
    2. using UnityEngine;
    3. using UnityEngine.UI;
    4. using UnityEngine.Events;
    5. using UnityEngine.EventSystems;
    6. using System.Collections.Generic;
    7. using System;
    8. namespace AdVd.GlyphRecognition
    9. {
    10.     /// <summary>
    11.     /// UI component to draw glyphs and find the closest match within a set of stored glyphs using a specific matching method.
    12.     /// </summary>
    13.     [RequireComponent(typeof(RectTransform), typeof(Image))]
    14.     public class GlyphDrawInput : MonoBehaviour, IBeginDragHandler, IDragHandler, IEndDragHandler, IPointerClickHandler {
    15.         [...]
    16.         Vector2 prevPos;
    17.         bool RectEventPoint(Vector2 position, Camera pressEventCamera, out Vector2 localPoint){ // This method returns the normalized position of the pointer and whether that position is inside the drawing area
    18.             RectTransform rt =  transform as RectTransform;
    19.             Rect r = rt.rect;
    20.             RectTransformUtility.ScreenPointToLocalPointInRectangle(rt, position, pressEventCamera, out localPoint);
    21.             localPoint-=r.center;
    22.             localPoint.x/=r.width*normalizedGlyphSize; localPoint.y/=r.height*normalizedGlyphSize;
    23.             return RectTransformUtility.RectangleContainsScreenPoint(rt, position, pressEventCamera);
    24.         }
    25.         public void OnBeginDrag (PointerEventData eventData)
    26.         {
    27.             if (eventData.button!=PointerEventData.InputButton.Left) return;
    28.             stroke=new List<Vector2>();
    29.             Vector2 localPoint;
    30.             if (RectEventPoint(eventData.pressPosition, eventData.pressEventCamera, out localPoint)) stroke.Add (prevPos=localPoint);
    31.         }
    32.         public void OnDrag (PointerEventData eventData)
    33.         {
    34.             if (eventData.button!=PointerEventData.InputButton.Left) return;
    35.             if (stroke!=null){
    36.                 Vector2 currPos;
    37.                 if (RectEventPoint(eventData.position, eventData.pressEventCamera, out currPos)){
    38.                     if (sampleDistance<Stroke.minSampleDistance){//No resample
    39.                         stroke.Add(currPos);
    40.                     }
    41.                     else{//Resample
    42.                         Vector2 dir=(currPos-prevPos);
    43.                         float dist=dir.magnitude;
    44.                         if (dist>0) dir/=dist;
    45.                         while(dist>sampleDistance){ // Sample the stroke with stable rate
    46.                             Vector2 point=prevPos+dir*sampleDistance;
    47.                             stroke.Add (point);
    48.                             prevPos=point;
    49.                             dist-=sampleDistance;
    50.                         }
    51.                     }
    52.                     if (OnPointDraw!=null){
    53.                         Vector2[] points=new Vector2[stroke.Count+1];
    54.                         stroke.CopyTo(points); points[points.Length-1]=currPos;
    55.                         OnPointDraw(points);
    56.                     }
    57.                 }
    58.             }
    59.         }
    60.         public void OnEndDrag (PointerEventData eventData)
    61.         {
    62.             if (eventData.button!=PointerEventData.InputButton.Left) return;
    63.             if (stroke!=null){
    64.                 if (stroke.Count<2){
    65.                     stroke=null;
    66.                     if (OnPointDraw!=null) OnPointDraw(null);
    67.                     return;
    68.                 }
    69.                 Vector2 currPos;
    70.                 if (RectEventPoint(eventData.position, eventData.pressEventCamera, out currPos)) stroke.Add(currPos);
    71.                 if (strokeList==null) strokeList=new List<Stroke>();
    72.                 Stroke newStroke=new Stroke(stroke.ToArray());
    73.                 strokeList.Add(newStroke);
    74.                 stroke=null;
    75.                 if (OnStrokeDraw!=null) OnStrokeDraw(strokeList.ToArray());
    76.             }
    77.         }
    78.         public void OnPointerClick (PointerEventData eventData)
    79.         {
    80.             if (eventData.button!=PointerEventData.InputButton.Left) return;
    81.             if (stroke==null && castOnTap){
    82.                 Cast();//This tells the GlyphDrawInput to run the recognition algorithm
    83.             }
    84.         }
    85.         [...]
    86.     }
    87. }
    88.  
    I'm not sure about how can this be done or whether it can be done at all with Gvr. What I want to achieve is a canvas in a 3d space upon which the input will be made, i.e. the pointer will essentially be the drawing tool for this particular canvas gameobject. I have seen quite a fev drawing app demos in daydream and I've read a bit about P$ and $ recognition tools http://depts.washington.edu/madlab/proj/dollar/ but I'm still lost here. Any help would be greatly appreciated.
     
  2. reedny

    reedny

    Joined:
    Mar 4, 2017
    Posts:
    57
    You can Raycast between the camera (person) and Controller reticle (or brush position):
    Code (CSharp):
    1. Vector3 direction = transforms.BrushPosition - transforms.CameraPosition;
    2. RaycastHit hitInfo;
    3. bool b = Physics.Raycast(transforms.CameraPosition, direction, out hitInfo, Mathf.Infinity, layerMask);
    This will work if the canvas is a 3D Unity object, like a flattened cube.
     
    Nickel0re likes this.
  3. Nickel0re

    Nickel0re

    Joined:
    Jan 17, 2018
    Posts:
    2
    Thanks a lot for replying!

    As far as I understood from the video
    the drawing is done on an actual "canvas" 2D gameobject. Would this imply that if I put it into a 3D world perspective and attach it to the flattened cube, for example, and then raycast to the "canvas" gameobject having enabled event trigger, that the input should be accepted provided I modify the input handlers?

    The developer mentioned "IBeginDragHandler, IDragHandler, IEndDragHandler, IPointerClickHandler", what would be the appropriate substitutes apart from standard input classes and event triggers like: GvrPointerInputModuleImpl, GvrAllEventsTrigger, GvrEventExecutor, GvrPointerInputModule and such?

    Additionally, why would I raycast from the player camera itself, but not from the controller?
     
  4. jmitcheson

    jmitcheson

    Joined:
    Oct 21, 2016
    Posts:
    96
    I've never used it, but what is this thing inside the engine itself? Can it help?
     
  5. reedny

    reedny

    Joined:
    Mar 4, 2017
    Posts:
    57
    If it's a Unity UI Canvas (2D) then you can use a GvrPointerGraphicRaycaster on the Canvas and the UI elements on that Canvas will receive standard Unity UI Input when the Controller reticle touches them. See the GVR demo scene for an example.
     
    Nickel0re likes this.