L3 – Group 16

Group 16:
Andrew
Brian
Kevin
Saswathi

Description

We built a miniature pacman that opens and closes its mouth using a servo motor while being reeled along a wire with edible “ghosts” on it through a servo motor. This reeling allows our pacman to change its position. We built this because one of our group members is an avid fan of pac-man and thought it would be awesome to see pac-man brought to life through motors. Overall we think our project was a success. We like that our final result was able to combine two motors to move both Pac-man’s mouth and his position in tandem. We initially tried to control Pac-man’s position with a DC motor, as we thought reeling in a line is a natural use of a DC motor. However, we found that the DC motor provided to us ultimately did not have enough power to move Pac-man’s body correctly, so we had to use a servo motor instead. If we were given another chance to work on this project, we would probably get a more powerful DC motor to control Pac-man’s position more naturally.

Pacman

Brainstorm Ideas

  1. Garden plant on wheels that moves towards the light
  2. A caterpillar like robot that is basically a motor going through the same motion, but lifting up to move forward
  3. A spider like crawler with multiple motors moving each leg
  4. A car with 4 wheels
  5. A two wheeler like a moped
  6. A two wheeler like a segway
  7. A servo motor to operate a rudder at the back of the robot and move the robot to its destination in water
  8. A robot that can operate a pulley and pull itself up
  9. A robot that will sense light and move away
  10. A cripple simulator that moves by twitching a limb using the servo motor
  11. A fan-powered car (the wheels are mobile, but it’s the fan that is actually motile and powered by a dc motor)
  12. A helicopter using motors as rotors
  13. Use a ton of wire to create a changing magnetic field to power our very own magnet train.
  14. A window washer that can ascend and descend vertical surfaces by winding up or out a string using a motor.
  15. A robot that will use a flex sensor to hit against a wall and “bounce” away when the flex sensor bends enough
  16. A morbid imitation goose (they have no gag reflex) that continually “swallows” a string and by doing so, achieves motion.
  17. Fish robot that you reel in.
  18. Robot that does the butterflystroke on land
  19. a robot that hops, like a rabbit with a motor winding a spring and then releasing
  20. a robot that has wings to flap – like one of those birds that fly in a circle
  21. a snake like robot that slithers,
  22. Spider robot that moves up and down in midair as if on a strand of silk.
  23. A motorboat-like thing that swims around a bowl of water
  24. Pac-man moving towards a dot

Design Sketches

Circuit Sketch
Pulley Motion
Gobble Motion
Full Design

Final System

Pacman inside:
Inside
Inside

Parts

  • Arduino
  • 2 Servo motors
  • Tape
  • Plastic bowls
  • Wire
  • Paper

Instructions

1. Make Pac-man’s body

-Setup Pac-man’s body with the two plastic bowls by placing the bowl for Pac-man’s bottom right side up and the bowl for Pac-man’s top upside down, on top of the bottom bowl.

-Connect a servo motor with the three leads at 5V, ground, and an Arduino digital output pin for controlling his mouth.

-Tape the base of the motor to the inside-rim of the bottom plastic bowl.

-Tape the moving knob of the servo motor to the inside-rim of the top of the plastic bowl. This will allow the top of Pac-man’s body to move up and down as if he were eating.

2. Make Pac-man move

-Connect another servo motor with the three leads at 5V, ground, and an Arduino digital output pin for controlling his movement.

-Create Pac-man’s reel by cutting a large length of wire and taping paper drawings of ghosts onto it.

-Securely attach an end of the reel to the knob of the servo motor. This will require tape and, in our case, a spool to be attached to the knob to increase the surface area of the knob.

-Securely tape the base of the servo motor onto the inner bottom of Pac-man’s lower bowl.

-Attach the other end of the reel to a static object, or your hand, so that the reeling action will displace the Pac-man body.

Source Code

/*

Group 16: Pac-man mover

*/

#include <Servo.h>

int motorPin = 3;

int servoPin = 9;

Servo servo;

int angle = 0;  // servo position in degrees

int speed = 50; // speed of dc motor

void setup()

{

  pinMode(motorPin, OUTPUT);

  servo.attach(servoPin);

  // start pac-man's motion forward

  analogWrite(motorPin, speed);

}

void loop()

{

  delay(200);

  // now make him eat!

  for(angle = 15; angle < 70; angle++) {

    servo.write(angle);

    delay(15);

  }

  for(angle = 70; angle > 15; angle--) {

    servo.write(angle);

    delay(15);

  }

}

CODE 2: 

/*

Adafruit Arduino - Lesson 14. Sweep

*/

#include <Servo.h>

int servoPin = 9;

int servo2Pin = 11;

Servo servo;

Servo servo2;

int angle = 0;  // servo position in degrees

int pause = 45;

int offset = 20;

void setup()

{

  servo.attach(servoPin);

  servo2.attach(servo2Pin);

}

void loop()

{

  // now make him eat!

  for(angle = 0; angle < 30; angle++) {

    servo.write(angle*2+offset);

    servo2.write(angle);

    delay(pause);

  }

  for(angle = 30; angle < 60; angle++) {

    servo.write((60-angle)*2+offset);

    servo2.write(angle);

    delay(pause);

  }

  for(angle = 60; angle < 90; angle++) {

    servo.write((angle-60)*2+offset);

    servo2.write(angle);

    delay(pause);

  }

  for(angle = 90; angle < 120; angle++) {

    servo.write((120-angle)*2+offset);

    servo2.write(angle);

    delay(pause);

  }

  for(angle = 120; angle < 150; angle++) {

    servo.write((angle-120)*2+offset);

    servo2.write(angle);

    delay(pause);

  }

  for(angle = 150; angle < 180; angle++) {

    servo.write((180-angle)*2+offset);

    servo2.write(angle);

    delay(pause);

  }
}

 

P3 – Epple (Group 16)

Group 16 – Epple

Member Names
Saswathi: Made the prototype & part of the Final document
Kevin: Design Idea & Part of the Final document
Brian:  Large part of the Final Document
Andrew: Created the Prototype environment & part of the Final Document

Mission Statement

The system being evaluated is titled the PORTAL. The Portal is an attempt at intuitive remote interaction, helping users separated by any distance to interact in as natural a manner as possible. Current interaction models like Skype, Google Hangouts, and Facetime rely entirely on users to maintain useful camera orientation and affords each side no control of what they are seeing. We intend to naturalize camera control by implementing a video chatting feature that will use a Kinect to detect the orientation of the user and move a remote webcam accordingly. Meanwhile, the user looks at the camera feed through a mobile viewing screen, simulating the experience of looking through a movable window into a remote location. We hope to learn in our first evaluation of our prototype ways to make controlling the webcam as natural as possible. Our team mission is to make an interface through which controlling web cameras is intuitive.

Description of Prototype

Our prototype uses a piece of cardboard with a cut out square screen in it as the mobile viewing screen. The user simply looks through the cut out square to view the feed from a remote video camera. From the feed, the user can view our prototype environment. This consists of a room with people that the user web chats with. These people can either be real human beings, or in some cases printed images of human beings that are taped to the wall. We also have a prototype Kinect in the room that is simply a decorated cardboard box.

Prototype in use. User keeps a subject in the portal frame by moving their own body.

Prototype in use. User keeps a subject in the portal frame by moving their own body.

Cardboard Kinect. Tracks user's motion and moves the remote webcam accordingly.

Cardboard Kinect. Tracks user’s motion and moves the remote webcam accordingly.

Stand-in tablet. The portal through which the user views the remote location's webcam feed.

Stand-in tablet. The portal through which the user views the remote location’s webcam feed.

 

Three Tasks

Task 1 : Web chat while breaking the restriction of having the chat partner sit in front of the computer

Difficulty: Easy

Backstory:

A constant problem with web chats is the restriction that users must sit in front of the web camera to carry on the conversation; otherwise, the problem of off-screen speakers arises.  If a chat partner moves out of the screen with our system, we can eliminate the problem of off-screen speakers through simply allowing the user to intuitively change the camera view to follow the person around. The conversation can then continue naturally in this situation.

How user interacts with prototype to test:

We have the user look through the screen to look and talk to a target person.  We have the person move around the room.  The user must move the screen to keep the target within view while maintaining the conversation.

Saswathi is walking and talking. Normally this would be a problem for standard webcam setups. Not so for the Portal! Brian is able to keep Saswathi in the viewing frame at all times as if he were actually in the room with her, providing a more natural and personal conversation experience.

 


Task 2 : Be able to search a distant location for a person through a web camera.

Difficulty: Medium

Backstory:

Another way in which web chat differs from physical interaction is the difference in the difficulty of initiation. While you might seek out a friend in Frist to initiate a conversation, in web chat, the best you can do is wait for said friend to get online. We intend to rectify this by allowing users to seek out friends in public spaces by searching with the camera, just as they would in person.

How user interacts with prototype to test:

User plays the “Where is Waldo” game , there are various sketches of people taped on the wall. The user looks through the screen and moves it around until he is able to find the Waldo target.

After looking over a wall filled with various people and characters, the user has finally found Waldo above a door frame.

 


Task 3 : Web chat with more than one person on the other side of the web camera.

Difficulty: Hard

Backstory:

A commonly observed problem with web chats is that even if there are multiple people on the other end of the web chat, it is often limited to being a one on one experience where chat partners wait for their turn to be in front of the web camera or crowd together to appear in the frame. Users will want to use our system to be able to web chat seamlessly with all the partners at once. When the user wants to address another web chat partner, he will intuitively change the camera view to face the target partner. This allows for dynamic, multi-way conversations not possible through normal web camera means.

How user interacts with prototype to test:

We have multiple people carrying a conversation with the user. The user is able to view the speakers only through the screen. He must turn the screen in order to address a particular conversation partner.

The webcam originally faces Drew, but Brian wants to speak with Kevin. After turning a bit, he finally rotates the webcam enough so that Kevin is in the frame.


 Discussion

The prototype is mainly to understand the user’s experience so we have a portable display screen that resembles an iPad made from a cardboard box with a hole for a screen cut out. One can walk around with the mobile display and also look through it at the environment. The Kinect is also modeled as a cardboard box with markings on it and placed in a convenient location as a real kinect that is detecting user movement would be placed. The prototype environment is made from printouts of various characters so that one can search for “Waldo”.

In creating our prototype, we found that the standard prototyping techniques of using paper and cardboard was plenty multi-purpose for our needs. It was difficult to replicate the feature of the camera following a scene until we hit upon the idea of simply creating an iPad “frame” which we would just use to pretend to be remotely viewing a place. Otherwise, the power of imagination made our prototype rather easy to make. We felt that our prototype worked well because it was natural, mobile, easy to carry, and enhance our interactions well (since there was literally nothing obstructing our interaction). Even with vision restricted to a frame, however, we found that our interactions were not in any way impaired.

Lab 2 – Group 16

Group Members
Andrew Boik (aboik@)
Brian Huang (bwhuang@)
Kevin Lee (kevinlee@)
Saswathi Natta (snatta@)

Group 16

Description

We built three instruments: Instrument of Light, a light detecting theremin from a photo resistor); Lollipop (it kind of looks like one), a proximity theremin using a capacitive proximity sensor; and La Tromba, a trumpet from three FSRs (for valves) and a flex sensor with wind sail attachment (for simulating airflow). All three worked fairly well, except the trumpet was a little difficult to control and the proximity sensor didn’t have a very wide range. In all three, we used a buzzer for sound, except for the trumpet which used a piezo element initially and then a buzzer in the final version. We ultimately decided to develop the trumpet into our final instrument. We were motivated to build this instrument because one of our group members plays trumpet, and we thought it would be interesting to create an electronic version of the instrument. Our final version featured a buzzer instead of piezo element, and we tuned our flex sensor thresholds so less blowing/bending would be required. The combination of FSRs held down is mapped to actual trumpet notes, and the signal from the flex sensor simulates the partial, so bending it further will result in a higher pitch. Overall we think the final result was a success. We liked that we could actually kind of play a scale and go through the trumpet’s full range with it. Our attachment to the FSR was a little bit of a disappointment because it was nearly impossible to control the sound by blowing on it like you would on a real trumpet, so we ended up manually bending the flex sensor instead.

Video

La Tromba (initial)
Use FSR sensors as 3 buttons for the trumpet and the flex sensors to sense when user blows into the trumpet. Sound is only generated when the flex sensor is bent and certain FSR sensors are pressed

La Tromba (initial) Video

Instrument of Light
Depending on the value that the photo resistor outputs, the tone that the buzzer plays will change.

Instrument of Light Video

Lollipop
Music by proximity: Using a capacitive proximity sensor to moderate which pitch that the buzzer plays. The pitch is higher if a person is closer to the sensor.

Lollipop Video

La Tromba (final)

La Tromba (final) Video

Instructions

Instrument of Light (Photo resistor theremin) – Wire a photocell with a voltage divider, using a 10k resistor to ground, between one pin connected to 5V power and the other pin connected to a Arduino analog input pin to detect the resistance.  Wire a buzzer to a chosen Arduino digital output pin and the shorter end to ground.

La Tromba (FSR and flex sensor trumpet) – Wire 3 FSR sensors using 10k pull down resistors for each. Each combination of buttons will produce a different tone. Wire a flex sensor with a 10K pull down resistor inside a hollow tube and when it is bent by wind being blown, it will allow the buzzer to sound a tone.

Lollipop (Capacitive proximity sensor theremin) Wire a capacitive proximity sensor, which is essentially a bit of aluminum just as in http://playground.arduino.cc//Main/CapacitiveSensor?from=Main.CapSense and connect a buzzer to the arduino to output sound depending on the proximity of the person

 

Materials Used in Final Instrument

1 Arduino
1 Small breadboard
3 FSRs
1 Buzzer
1 Flex sensor
1 Paper wind sensor attachment to flex sensor
4 10K resistors

 

Code

La Tromba

Source Code Here

Instrument of Light

int light_pin = A0;

int speaker_pin = 8;
void setup() {
 Serial.begin(9600);
 pinMode(8, OUTPUT);
}
void loop() {
 while(1) {
 int reading = analogRead(light_pin);
 Serial.println(reading);
 tone(speaker_pin, reading);
 }
}

Lollipop

#include <CapacitiveSensor.h>
/*
 * CapacitiveSense Library Demo Sketch
 * Paul Badger 2008 edited 2013 by Princeton HCI lab group 16
 * Uses a high value resistor e.g. 10 megohm between send pin and receive pin
 * Resistor effects sensitivity, experiment with values, 50 kilohm - 50 megohm. Larger resistor values yield larger sensor values.
 * Receive pin is the sensor pin - try different amounts of foil/metal on this pin
 * Best results are obtained if sensor foil and wire is covered with an insulator such as paper or plastic sheet
 */
/* Modified by Lab Group 16 COS 436 HCI Spring 2013 */
CapacitiveSensor cs_4_2 = CapacitiveSensor(4,2); // 1 megohm resistor between pins 4 & 2, pin 2 is sensor pin, add wire, foil
long sensorValue;
long sensorHigh = 0;
long sensorLow = 2000;
void setup() 
{
 cs_4_2.set_CS_AutocaL_Millis(0xFFFFFFFF); // turn off autocalibra- //te on channel 1 - just as an example
 Serial.begin(9600);

 // upon starting, we have five seconds to give the proximity
 // detector an upper and lower bound
 while (millis() < 5000) {
 sensorValue = cs_4_2.capacitiveSensor(30);
 if (sensorValue > sensorHigh) {
 sensorHigh = sensorValue;
 }
 if (sensorValue < sensorLow) {
 sensorLow = sensorValue;
 }
 }
 Serial.print("sensorHigh = ");
 Serial.println(sensorHigh);
 Serial.print("sensorLow = ");
 Serial.println(sensorLow);
}
void loop() 
{
 sensorValue = cs_4_2.capacitiveSensor(30);

 // map the sensor values to a wide range of pitches
 int pitch = map(sensorValue, sensorLow, sensorHigh, 1047, 131);

 // play the tone for [duration] ms on pin 8
 tone(8, pitch);
}

 

Assignment 2 – Andrew Boik

1. Observations

Observations were conducted outside near Friend Center and the COS building, in the Architecture building while waiting for COS 461 to start, and in Woolworth 106 before the start of class.

Notes:

1. MUS314 Professor tries to hook up computer to projector and audio system – has difficulty getting everything set up correctly. This seems to be a common occurrence in many of my classes, and it seems to happen more often in specific rooms. Perhaps an app  explaining with instructions and troubleshooting for A/V setup would be appropriate here.

2. Student in COS 436 spends 10 minutes checking Facebook and email. This is a pretty generic use of time. An app that could enhance this experience by combining posts from multiple social networks and email, or summarizing and condensing the content into a few blocks short enough to read in 10 minutes would be appropriate. Or perhaps the student should really be doing homework in which case an app that encourages the student to work or prevents use of certain applications might be useful.

3. Student reads slides for today’s COS 461 lecture before it begins (in Architecture N101). This may be a good way to get a head start on the day’s class, or it may be a waste of time because the student needs extra explanation from the professor to understand what he/she is reading. Perhaps a better use of time would be to review the previous lecture’s material, and an app the lets users swap notes or tests them on concepts from last lecture could be helpful.

4. Student running to Friend Center late for class. This student may have lost track of time in which case an app reminding them to go to class could be appropriate. Maybe they took an inefficient route, suggesting a use for a shortest path directions app.

2. Brainstorming

  1. Desktop app to swap notes with classmates and review before class
  2. Mobile/web app to condense overview of news stories into a specified period of reading time (e.g. 10 minutes) for quick glance
  3. **Mobile app to digitize book/textbook using optical character recognition for reading on-the-go (i.e. going to class/ waiting for it to start), searching for text, and sending pages to friends
  4. Mobile/desktop game to wake up student’s brain to concentrate better during lecture
  5. Mobile/desktop app to provide feedback on lectures to the professor (could use the time before class to provide feedback on the last lecture or use it interactively throughout the lecture)
  6. Mobile study app to review key concepts from last lecture
  7. *Mobile app to let student know where their friends are sitting in a crowded lecture hall
  8. Mobile app to let student know how many seats are left in a lecture hall (to let them know if they need to hurry up to class or if they can slow down)
  9. Mobile/desktop app to calculate the time it will take a student to walk to their next class at their current rate using the most efficient route, letting them know if they need to walk faster or can afford to slow down/leave their current location later
  10. Troubleshooting app for audio/visual setup by a lecturer
  11. App that tells student whether or not they should go to class based on factors such as importance of material in lecture, student’s current workload, weather, student’s ability to stay awake in class, proximity of exams etc.
  12. App that allows teachers to remind students of any materials they need to bring to class
  13. App that delivers the best cat videos of the day to students for viewing before class
  14. App that annoys you if you try to do anything but homework while the app is running
  15. Mobile app that allows you to take pictures of people around you, find their name and Facebook stalk them

3. Ideas Chosen For Prototyping

My favorite ideas were the app to digitize textbooks and the app to let students know where their friends are sitting.

I liked the textbook app idea because sometimes I wish I had my a few chapters of my textbook as a reference before or during class, but I hate having to carry a heavy textbook around.

I liked the find your friends app because a student could determine where their friends are sitting ahead of time, and if a student is late to class they can go straight to where their friend is upon entering the hall without having to spend time looking around.

 

4. Photos & Descriptions of Prototypes

Prototype 1 – Book Digitzer

Photo 2013-02-28 02.56.52

Upon opening the app, the user sees a screen with the digitized books currently stored by the app. In this case, there are none, so a button to add a new book is all that is displayed.

When the user presses the add button, a screen is displayed with options to edit the title and author of the new book and options to digitize the book with a smartphone camera or import it from a photo library.

When the user presses the add button, a screen is displayed with options to edit the title and author of the new book and options to digitize the book with a smartphone camera or import it from a photo library.

In this case, the user presses the edit button next to the title and types in the title.

In this case, the user presses the edit button next to the title and types in the title.

This is the screen displayed after user presses the “Capture book with camera” button. Users can use the camera display to take photos of each page. Users can also edit the page number in the box below the camera button.

This is the screen displayed after user presses the “Capture book with camera” button. Users can use the camera display to take photos of each page. Users can also edit the page number in the box below the camera button.

The user must wait for optical character recognition to complete and digitize the book.

The user must wait for optical character recognition to complete and digitize the book.

Now we are back to the main menu where a button for the new book is displayed.

Now we are back to the main menu where a button for the new book is displayed.

Upon pressing the book's button, the user is presented with a book menu that has several options: read the book from the beginning, select a chapter from the chapter list if entered by user (feature not shown), search for terms, or send all or part of the book to a friend.

Upon pressing the book’s button, the user is presented with a book menu that has several options: read the book from the beginning, select a chapter from the chapter list if entered by user (feature not shown), search for terms, or send all or part of the book to a friend.

This is the book viewer screen with buttons to return to the book menu, go back a page, go forward a page, and search for terms on the page.

This is the book viewer screen with buttons to return to the book menu, go back a page, go forward a page, and search for terms on the page.

This is the search screen where the user is search for the word ‘serial’. A list of pages on which the term appears and a brief context in which it appears are displayed.

This is the search screen where the user is search for the word ‘serial’. A list of pages on which the term appears and a brief context in which it appears are displayed.

This is the sharing screen, where the user can send the entire book or a range of pages to a friend via email. Other possibilities could include sharing through social networking sites such as Facebook.

This is the sharing screen, where the user can send the entire book or a range of pages to a friend via email. Other possibilities could include sharing through social networking sites such as Facebook.

Prototype 2 – Friend Seat Finder

The main menu screen is displayed here. Users can press “find seat near friends” to access the primary functionality of the app, press “friends” to display a list of their friends, press “My profile” to display their profile with options for editing, and “Choose school” to select from a list of schools for which the app is designed for.

The main menu screen is displayed here. Users can press “find seat near friends” to access the primary functionality of the app, press “friends” to display a list of their friends, press “My profile” to display their profile with options for editing, and “Choose school” to select from a list of schools for which the app is designed for.

This screen displays the user’s profile with fields for name, class, major, and schedule. There is also an option to have a profile picture. Depending on the school chosen by the student, editing schedule will present a list of classes currently being offered that semester that the user can add to their schedule.

This screen displays the user’s profile with fields for name, class, major, and schedule. There is also an option to have a profile picture. Depending on the school chosen by the student, editing schedule will present a list of classes currently being offered that semester that the user can add to their schedule.

This screen displays the user’s list of friends. People in this list have installed the app on their app or desktop and have accepted the user’s friend request. Users can click on a friend to view their profile, and add/remove friends by selecting the friend from the list and pressing the ‘+’ or ‘-’ buttons, respectively.

This screen displays the user’s list of friends. People in this list have installed the app on their app or desktop and have accepted the user’s friend request. Users can click on a friend to view their profile, and add/remove friends by selecting the friend from the list and pressing the ‘+’ or ‘-’ buttons, respectively.

This screen displays a friends profile. The friend’s name, class, major, profile pic, and schedule are displayed.

This screen displays a friends profile. The friend’s name, class, major, profile pic, and schedule are displayed.

This is the add friend screen. Friends can be invited via email, selected from a list of contacts, or invited through Facebook or Google+.

This is the add friend screen. Friends can be invited via email, selected from a list of contacts, or invited through Facebook or Google+.

This is where the action happens! SeatFinder keeps track of your schedule, and when it is almost time for class the app will display a map of the room in which the class is being held. The user can broadcast to their friends where they are sitting by clicking on a part of the seat map. The location of their friends is represented by a pin with a picture of their friend above. The total number of friends in class is also displayed under the map.

This is where the action happens! SeatFinder keeps track of your schedule, and when it is almost time for class the app will display a map of the room in which the class is being held. The user can broadcast to their friends where they are sitting by clicking on a part of the seat map. The location of their friends is represented by a pin with a picture of their friend above. The total number of friends in class is also displayed under the map.

 

5. User Testing

Notes:

User testing was done with the textbook digitizing app.

Testing was conducted with four users in various locations: two eating clubs, a room in Frist, and a dorm room. Users were all seniors majoring in Physics, Computer Science, or Ecology and Evolutionary Biology. Users were given a brief description of the app’s purpose, but no instructions on navigating the interface.

One user noted that there are apps similar to this, such as Genius Scan. However, it was noted that this app would also do optical character recognition and index terms to allow for advanced search capabilities.

Overall, users navigated successfully through the interface and used the prototype for its given purpose. Some users were confused as to what some of the screens were – for example, the screen that allows users to read through the textbook.

One user was confused by one of the buttons – the add new book button in the first screen – saying the design reminded them of the Red Cross.

Pictures:

Photo 2013-02-28 02.56.47

Liz scans a page of her textbook and sends it to Amma.

Photo 2013-02-28 02.56.40

Amma forgot her textbook, so the pages Amma receives are very helpful for reviewing before class.

Photo 2013-02-28 02.56.24

Sam scans pages from her notebook.

Photo 2013-02-28 02.56.19

Sam emails the pages to Ben.

Photo 2013-02-28 02.56.12

Ben receives Sam’s notes and is able to review them while eating.

 

6. Insights

Most of the confusions regarding the screens of the prototype seemed to be limitations of the prototype as opposed to a flaw in the app design itself. Perhaps text labels for each of the different screens will make it clearer to the user what the desired purpose of the particular screen is, although if used in an actual app I believe the screen’s purpose would be readily apparent.

The confusion over the button design may stem from the low-fidelity of the drawing and could possibly be mediated by ensuring there is corresponding text with every button.

A tutorial may also be helpful in reducing confusion of the user.

Most of the interface of the prototype is similar to countless other apps. The target market for this app, college students, are mostly familiar with smartphone apps. The test users are all college seniors from a variety of majors, so they are most likely a good representation of the target demographic. Their success in using the app shows there is a high probability that the target demographic will be able to successfully use the app.

P1 – Epple

Group 21 – Epple

Andrew Boik (aboik@)
Brian Huang (bwhuang@)
Kevin Lee (kevinlee@)
Saswathi Natta (snatta@)

Brainstorming

  1. Head tracking for use in dynamic crosstalk cancellation for 3D binaural audio
    IMG_1710
  2. [Expansion on previous idea] Moving speakers and display to sweet spot you as you move around the room
  3. Sign language to English translator
    IMG_1708

    1. can translate to different languages, not just english
    2. maybe as a design plan, put indicators on fingertips to make it easier to identify and track different finger positions ← can use this as a design alternate
    3. May also do flag semaphores.
      IMG_1701

      1. Could actually be really useful for ships
  4. [Expansion on previous idea] Sign language teacher ← similar to above but with error correction!
  5. Chair that alerts you when you have bad posture
    1. design would need lots of pressure sensors embedded in a chair – might be difficult mechanically for our group
  6. Table on wheels that follows you
  7. Take a picture from far away with a gesture instead of timer
  8. Device to help you read in bed
    1. Gestures or button to flip a page
    2. One hand controller might be easier and more comfortable
    3. Extensible to music (idea is basically for seeking through book without hands)
  9. Alarm for if you look like you fell asleep while driving (or in office)
  10. [Expansion on previous idea] Alarm clock that will ring if you stay or go back to bed
  11. [Expansion on previous idea] Alarm that warns you if you are about to bump into something in the dark
  12. [Expansion on previous idea] Alarm that sounds when a person falls, aimed at saving elderly
  13. Cheap paper keyboard and use device to actually detect keystrokes
  14. [Expansion on previous idea] Play air piano and actually generate sound…
    1. a potential design for this would require a projector, maybe like a table top computer
  15. Device to automatically turn off stove (or electronic device) when you are not present for a long time
    1. default: maybe after 30 minutes, but have a button or something that would allow you to disable it if you decide to, say, simmer something for several hours
  16. [Expansion on previous idea] Device to detect if you left the garage door/car door on/unlocked (or the toilet seat up!)
  17. Use gestures as a universal remote control for your entertainment center
  18. Use gestures to smoothly move through pdf or slides
  19. Detector for raised hands used for fast polling (or alert professor that someone has a question)
    IMG_1707
  20. Use gestures to conduct a virtual orchestra that will play related music
    IMG_1709
  21. Create a robot servant that will identify different objects and bring them to you based on your command
    1. Design: involves a camera, a claw to grab object, possibly an arm to move objects out of the way and a motor as well as movement vehicle that is stable enough to pick up simple objects
  22. Scanning system to detect dirt on a surface
  23. Object finding device
    1. plays a sound to show location
    2. shows on a screen where object is in the room
    3. Design: would require a tracker to be placed on each object and involves wireless technology
  24. Digital golf swing critiquer.
  25. [Expansion on previous idea] Augmented tennis ball machine that can identify and drill your weakest strokes to improve practice.
    1. Use similar idea to help you improve your baseball skills
  26. [Expansion on previous idea] A digital personal trainer to watch your weight lifting form (and give encouragement! RAH!)IMG_1703
  27. [Expansion on previous idea] A tracking digital coach that can critique swimming stroke form (would have to move with swimmer).
  28. [Expansion on idea 26] Digital coach to watch you exercise,  and then aggregate actions to determine what muscles might have experienced the most strain, and recommend a battery of stretches that target it.
    1. Fancy treadmill or other exercise machine that, in addition to having a specialized routine that targets a certain group of muscles, also recommends stretches for it.
    2. Design: would need complex systems of determining movement if it is kinect based. otherwise, would need complex systems of pressure sensors in exercise equipment to determine which muscles were used and need to be stretched
  29. [Expansion on previous idea] An alternative to prescribing stretches is to use systems of pressure sensors to see if the user is doing the exercise correctly depending on where there is more or less pressure on the exercise equipment.
  30. An instrument monitor that would provide feedback on when it has warmed to room temperature (helps a great deal for tuning, especially woodwind and brass instruments), or at least a stable temperature (if playing an instrument in a cold place, for example).
  31. [Expansion on previous idea] An instrument tuner that would dynamically tune certain instruments (like a trumpet) using a tuner and robotic hand to manually move tuning slide.
  32. Prevent cat from scratching sofa (more generally, prevent a pet from doing something bad) by detecting the pet scratching the furniture based on patterns of movement and sounding an alarm or sending a text to the owner.
  33. [Expansion on previous idea] would detect if pet has a certain pattern of movement to signify that it needs/wants its owner back if they are outside the house and signals the owner of this. Similar to a baby monitor, or like a cellphone idea for pets to use
  34. Child/pet/prisoner detector that detects how “lost” target is based on distance/obstacles between target and device.
    1. Design: would have two modules. One is the base device with an alarm and a wireless receiver and the other would be the module that goes with the child/pet/prisoner with a transmitter and a circuit to calculate how far away it is from the base. If we want large distances, we can use GPS based technology, for small distance we can use signal strength from base or proximity to some preset border.
  35. Detect how long the line outside a restaurant is.
  36. Use a temperature sensor to make doors that are not double doors lock if temperature is below a certain level
    1. Idea inspired by observing students being annoyed by the equad single doors being used in the wintertime, letting in cold wind instead of taking two steps to open the double doors
    2. Design: detect temperature and wind level outside and enable automatic door locking mechanism
  37. A group tracker and interface that will keep a tour group together – maybe point in direction of the tour guide if the person gets lost or play a noise.
  38. A noise detector that plays an annoying alarm if a roommate plays music too loudly.
  39. A student tracker for detecting whether students are bored in class – note nodding heads, facebook checking, etc.  Mainly intended for use in large classes.
  40. Pseudo-GPS system that uses Wi-fi and sensors like accelerometers to provide position tracking via dead-reckoning
    1. Map new areas with sensors, and record relative positioning.  Never get lost in a new place again!
  41. Use level bar and adjustable legs to automatically level a pool table
  42. Stalker detector that tells you if it looks like you’re being followed
  43. Robot arms controller that looks at your arm motions and replicates it with a pair of robot arms
  44. IMG_1706
    1. can be used for long distance control (like surgery from a different hospital)
    2. or perhaps scale down movement so that large actions translate to small ones (for microscopic work?)
    3. or scale up actions?  Sounds like big robots now…
    4. maybe try reversing actions to help, say, put on makeup
  45. Use kinect to rotate virtual 3d objects
    IMG_1705

    1. Use similar system to reorganize/redecorate a room
  46. Device to check if a person limit is exceeded
  47. Virtual tour where your leg movement is tracked and used to move through a displayed virtual 3d space
  48. Head movement tracker that will allow for exploration of a remote environment according to your head movements and displays the image in that direction on a display.
  49. Presentation trainer that will warn you if you are fidgeting or have other bad habits
  50. Suit for providing remote tactile feedback
  51. Smart window blinds controller that raises and lowers based on detected ambient light (may need an array to be accurate and to keep unscrupulous types from seeing inside by shining a flashlight at the window).
  52. Motile scarecrow (or scaredog for those annoying Canada Geese?) that will move suddenly and/or make noise if pests get too close.
    IMG_1711
  53. Recording device for creating beats from ambient noise.IMG_1704
  54. Karaoke song selector (takes input from ambient noise/sound to select songs based on “mood”).
  55. Automatic gardener: water plants when necessary (if it’s been very sunny and dry, or plants look dehydrated)
  56. Virtual environment simulator that will map movements and show on screen a corresponding superpower. Such as a flick of the wrist will show your image on screen with a spiderman type web. (inspired by the movie BOLT) (this has practical use.  We promise)
  57. Virtual reality for providing virtual friends for lonely people (while pretending it’s a multiplayer game?).

 

Project Description

Choice 1:
Idea 47: Kinect based head movement tracker that will allow for exploration of a remote environment according to your head movements and pans around to display the image in that direction on a display.

We picked this amongst all the possibilities in the list because it has a clear application for us and what we are studying in terms of using HCI to break past the stagnant interface that is the mouse and keyboard.  It involves a more natural way of computer interaction as it allows for humans to intuitively turn their heads and have an imaginary camera replicate this movement to show what one wants to see in a remote environment. This can be accomplished through panning around a panoramic image or actually controlling a rotating video camera through Kinect.  It also involves a complex enough challenge in head tracking that it is not trivial.

Target users: Our project may be broadly aimed towards anyone who wants to remotely visualize an environment. However, for purposes of our project, we specifically identify people who wish to travel but do not have the time or funds to do so.  These users seek a means for exploring a space as naturally and intuitively as possible (the stronger the sense of immersion, the better) and need a system that can bring the sights and sounds to the area to them with the least sense of artifice possible. Possible applications that we could apply this to include Google Streetview and virtual tours.

Problem description and context: Currently, exploring remote environments through applications such as Google Streetview rely on the clunky interface that is the mouse and keyboard.  Unfortunately, this is an unintuitive tool that often causes people to awkwardly explore an environment through clicking arrows and pressing keys.  This makes true immersion impossible.  The goal of our project is to create an interface to make remote viewing of an environment much more intuitive.  We aim to improve the situation by replacing the mouse and keyboard with Kinect-based head tracking used to pan around the environment.  The image of the environment will then be displayed on a mobile display so as to always keep the display in view of the user.  Aspects that may influence the problem solution include the demand for viewpoints; that is, each user will require a unique viewer of the environment that will respond only to his/her movements.  If there is high demand, then an easily scaled solution may be necessary, such as virtualizing the experience into an exploration of a panoramic image.  Panoramic images are easily supplied as anyone can take a panoramic image and upload it for multiple users to concurrently use.  By having others upload panoramic images, this also allows us to solve a time and location problem, as we will not have to actually travel to the viewpoint to get the image.  Our user group is characterized by little available time and money.  We thus will want to aim to keep the cost of accessing our tool low.  Related solutions to this problem include the Oculus Rift; however, the Oculus Rift is at this point an unproven technology and is far from being easily obtainable.

Technology platform choice: We choose to use the Kinect as it offers the necessary support for head tracking which we need to control the exploration of our remote environment with. We also choose to display the scenery on a mobile display such as a laptop screen, iPad, or smartphone held by the user as it allows us to change the view of the scenery while keeping the display in view of the user.

Sketches:

IMG_1700 IMG_1712


Choice 2 (non-Kinect backup):
Idea 10: Intelligent alarm clock controlled by pressure sensors on a bed, refusing to turn off until the owner gets up off the bed.

Although this is a simple idea, we think it would be very practical.  Actually, we would really like to have this and wonder why no one has done this before.  Our target user group is people who have trouble waking up in the morning.  We’re specifically interested in users who frequently catch themselves sleeping through alarms and/or turning them off without remembering doing so.  Conventional alarm clocks are not sufficient for these users because they are so easy to ignore or turn off.  What they need is something more persistent, but intelligently so.  An alarm that is excessively difficult to turn off will indeed force these users to get up in the desired situations, but in others (for example, if the user has been up all night, has already woken up, or is sleeping elsewhere), the system is an annoyance.  These users want an alarm that is persistent only when it needs to be–that is, have sensitivity for context.  We can always add features to enhance this device in case it is too simple (automatic prompt for alarm when body detected, context-aware alarm types, or daylight tracking).

IMG_1702

Arduino Says – Red Light Green Light

Group

Andrew Boik (aboik@)
Brian Huang (bwhuang@)
Kevin Lee (kevinlee@)
Saswathi Natta (snatta@)

Description

For our Part 3 we chose to implement a simple Red Light, Green Light game. We are using one red and one green LED to indicate when the user is supposed to stop or allowed to go. We are using the Flex sensor as a joystick where bending it in one direction signifies accelerating and not bending or bending in the other direction signifies stopping. If the green light is on, the user is allowed to “go” by bending the flex sensor forward to accumulate points in the game. If the red light is on, the user is supposed to “stop” by bending the flex sensor backwards. If the user by mistake “goes” while the red light is on, the buzzer will play a sad song signifying the end of the game. To play again, however, one need only reset the Arduino. We are using a random number generator to determine when the red light or the green light will be on. The diffuser is basically our traffic light contraption made of tape and plastic. Our project was primarily motivated by the desire to implement an interactive, joystick-controlled system. Ultimately, we think this project was a great success. We particularly like how we were able to make a responsive joystick with just a flex sensor. If we could change something, we would somehow add components to convey the gamer’s score. Unfortunately, in the current implementation, the gamer is completely oblivious to the number of points he has accumulated. Note: We could have used a button as the user control for go and stop, but we chose to use a flex sensor to give the user a feeling of a joystick. With a flex sensor, if we choose, we can also allow the user to go faster or slower and accumulate more points faster or slower instead of a simple stop and go. If you fail too many times, the police will (not) come after you with the siren from Lab group 25.

Sketches of Early Ideas

Bicycle Lights

Bicycle Lights

sketch2-group21

Simplified Theremin

sketch3-group21

Red Light Green Light 

Final Sketch

finalsketch

Demonstration Video

Parts used in design:

  • Arduino
  • Red LED
  • Green LED
  • Buzzer/speaker
  • Flex sensor
  • Plastic and tape for diffuser

Instructions:

  1. Setup LEDs, buzzer, and flex sensor
    1. Place the long end of each LED to the chosen Arduino digital output pin and the shorter end to ground
    2. The buzzer is also wired between an Arduino digital output pin (that can generate a PWM) and ground
    3. the Flex sensor is wired with a voltage divider between one pin and power and the other pin connects to a Arduino analog input pin to detect the resistance
      1. Note: if the voltage divider is not in place, the Arduino only reads a full high power and does not read the varying resistance when the flex sensor is bent
  2. Setup diffuser
    1. Diffuser is made of tape placed over two circular holes in a plastic packaging material. The red and green LED are placed under the two holes and are visible through the tape. the buzzer is covered by the plastic.
  3. Test baseline for flex sensor and change ‘go’ threshold as necessary
    1. Once the Flex sensor is connected, with the proper (~10K) voltage divider connected to power on one pin and the other pin connected to the Arduino, the Arduino software will display it’s reading of the input. You can bend it forward and backward to see how the values change. We chose a threshold value of 360, where values above the threshold are the “go” state and values below are the “stop” state
    2. The buzzer will sound if the red LED is on and the flex sensor is in the go state, signifying that the game is over.
Setup with Diffuser

Setup with Diffuser

Setup without Diffuser

Setup without Diffuser

 Code:

/*
Red Light/Green Light Game!
*/

#include "pitches.h"

int flexSensorPin = A3;
int redLED = 9;
int greenLED = 11;
int state; // holder for red/green light state
int pause = 250; // constant for use in error state
int leeway = 400; // time for player to switch
int interval = 5; // ticks before randomly generating a light
int counter = 0;
int goThreshold = 360; // threshold for stop/go.
                       // Values above are “go”
int score; // currently inconsequential,
           // but could be used to create top scorers

// notes in the melody:
int melody[] = {NOTE_C5, NOTE_B4, NOTE_AS4, NOTE_A4};

// note durations: 4 = quarter note, 8 = eighth note, etc.:
int noteDurations[] = {4, 4, 4, 1};

void setup(){
  Serial.begin(9600);
  pinMode(redLED, OUTPUT);
  pinMode(greenLED, OUTPUT);

  digitalWrite(redLED, HIGH); // initialize to red light
  digitalWrite(greenLED, LOW);
  state = 0;
  score = 0; // set score to zero
}

void loop(){
  if (counter >= interval) {
    state = random(2);
    light(state);
    counter = 0;

    delay(leeway); // give the player time to react
    leeway = leeway - 1; // make the game iteratively harder
  }

  int flexSensorReading = analogRead(flexSensorPin);
  switch (state) {
    case 0: // red light is on
      if (flexSensorReading <= goThreshold) { 
        // success! 
        score++;
      } 
      else { 
        failure();
      } 
      break;
    case 1:
      // green light is on 
      if (flexSensorReading > goThreshold) {
        // Good! Get points!
        score++;
      }
      else {
        // we don’t punish for not going, you just get no points
      }
      break;
    default:
       // we should never get here
       error();
  }

  Serial.print("state: ");
  Serial.println(state);
  Serial.println(flexSensorReading); // debugging?

  counter++;
  delay(pause);
}

void light(int status){
  switch (status) {
    case 0: // red light!
      digitalWrite(redLED, HIGH);
      digitalWrite(greenLED, LOW);
      break;
    case 1: // green light!
      digitalWrite(redLED, LOW);
      digitalWrite(greenLED, HIGH);
      break;
    default:
      //this should never happen
      error();
  }
}

void error() {
  while (true) {
    digitalWrite(redLED, LOW);
    digitalWrite(greenLED, LOW);
    delay(pause);
    digitalWrite(redLED, HIGH);
    digitalWrite(greenLED, HIGH);
    delay(pause);
  }
}

void failure() {
  // iterate over the notes of the "fail" melody:
  for (int thisNote = 0; thisNote < 4; thisNote++) {
    // to calculate the note duration, take one second
    // divided by the note type.
    // e.g. quarter note = 1000 / 4, eighth note = 1000/8, etc.
    int noteDuration = 1000/noteDurations[thisNote];
    tone(8, melody[thisNote],noteDuration);

    // to distinguish the notes, set a minimum time between them.
    // the note's duration + 30% seems to work well:
    int pauseBetweenNotes = noteDuration * 1.30;
    delay(pauseBetweenNotes);
    // stop the tone playing:
    noTone(8);
  }
error();
}