diff --git a/Lab 1/Feedback/Feedback.pdf b/Lab 1/Feedback/Feedback.pdf new file mode 100644 index 0000000000..dbf02e79ee Binary files /dev/null and b/Lab 1/Feedback/Feedback.pdf differ diff --git a/Lab 1/Feedback/text b/Lab 1/Feedback/text new file mode 100644 index 0000000000..b19b4534c8 --- /dev/null +++ b/Lab 1/Feedback/text @@ -0,0 +1,11 @@ +I presented my seven storyboard ideas to my breakout room group. I explained the plan (setting, players, activity, and goals)for each storyboard and showed my 4–frame sketches. + +**Feedback I received:** +They liked that the interactions were very clear and easy to understand - like a storybook. +They suggested keeping the light colour consistent across frames to avoid confusion (e.g., yellow = warming, green = good, red = bad). +They mentioned that for the sunrise alarm, the gradual brightness idea was intuitive and could work well in real life e.g. at hotels or luxury apartments. + +**What I improved:** +I made sure the light colours in all storyboards were consistent and clearly indicated the right status. +I simplified my sketches to make them easier to follow (fewer background details). +I made the captions shorter and more direct so they fit neatly under each frame. diff --git a/Lab 1/Plan/Page 1.jpg b/Lab 1/Plan/Page 1.jpg new file mode 100644 index 0000000000..1c117c251b Binary files /dev/null and b/Lab 1/Plan/Page 1.jpg differ diff --git a/Lab 1/Plan/Page 2.jpg b/Lab 1/Plan/Page 2.jpg new file mode 100644 index 0000000000..d1b1ca509c Binary files /dev/null and b/Lab 1/Plan/Page 2.jpg differ diff --git a/Lab 1/Plan/Setting, players, activity and goals.pdf b/Lab 1/Plan/Setting, players, activity and goals.pdf new file mode 100644 index 0000000000..c52cea4452 Binary files /dev/null and b/Lab 1/Plan/Setting, players, activity and goals.pdf differ diff --git a/Lab 1/Plan/text b/Lab 1/Plan/text new file mode 100644 index 0000000000..676a3cc6e4 --- /dev/null +++ b/Lab 1/Plan/text @@ -0,0 +1,62 @@ +Storyboard 1 – Charging Status Indicator +Setting: A bedroom nightstand or desk, at night or during the day when the phone needs charging. +Players: The phone owner. +Activity: The user places their phone on the wireless charger and glances at the light to check charging progress. +Goals: +User: To know when the phone is fully charged without unlocking it. +Device: To communicate charging status clearly using light colour. + + +Storyboard 2 – Smart Entryway Weather Indicator +Setting: House entryway. +Players: The person living in the house (and potentially family/housemates). +Activity: The user checks the light above the door before heading outside. +Goals: +User: To know if they need an umbrella, coat, or other gear before going out. +Device: To indicate good or bad weather quickly. + + +Storyboard 3 – Remote Locator +Setting: Living room. +Players: The person watching TV. +Activity: The user notices the remote is missing, uses the app to trigger the light on the remote, and retrieves it. +Goals: +User: To find the remote quickly and avoid wasting time searching. +Device: To make the remote’s location obvious through a glowing light. + + +Storyboard 4 – Smart Mailbox +Setting: Outside the house or at the apartment mailbox area, any time mail is expected. +Players: Resident, mail carrier. +Activity: The resident looks at the mailbox light before walking to it. +Goals: +User: To avoid checking an empty mailbox unnecessarily or to check at night. +Device: To signal whether mail has been delivered (green = empty, red = full). + + +Storyboard 5 – Smart Pot +Setting: Kitchen stove area. +Players: Home cook, pot, stove. +Activity: The user puts water to boil, and the pot detects temperature rise, turning on a light when boiling point is reached. +Goals: +User: To know when water reaches boiling without constantly watching. +Device: To visually alert user with light when boiling is complete. + + +Storyboard 6 – Sunrise Alarm Light +Setting: Bedroom, early morning before wake-up time. +Players: User (sleeping person). +Activity: Light gradually brightens before wake-up time to simulate sunrise. +Goals: +User: To wake up more naturally and gently. +Device: To gradually prepare user’s body for waking by increasing light brightness. + + +Storyboard 7 – Fish Tank Monitor +Setting: Living room or wherever the fish bowl is placed, throughout the day. +Players: Fish, fish owner. +Activity: Light on the lid indicates water quality. Owner refills or cleans water when needed. +Goals: +User: To maintain healthy water for the fish and know when it needs changing. +Device: To alert user to water condition through light colour changes. + diff --git a/Lab 1/Prototypes/Idea: Storyboard 1 - The Charger.heic b/Lab 1/Prototypes/Idea: Storyboard 1 - The Charger.heic new file mode 100644 index 0000000000..63859d97c3 Binary files /dev/null and b/Lab 1/Prototypes/Idea: Storyboard 1 - The Charger.heic differ diff --git a/Lab 1/Prototypes/Idea: Storyboard 1 - The Charger.jpg b/Lab 1/Prototypes/Idea: Storyboard 1 - The Charger.jpg new file mode 100644 index 0000000000..8f08ce69cf Binary files /dev/null and b/Lab 1/Prototypes/Idea: Storyboard 1 - The Charger.jpg differ diff --git a/Lab 1/Prototypes/Idea: Storyboard 1 - The Phone.heic b/Lab 1/Prototypes/Idea: Storyboard 1 - The Phone.heic new file mode 100644 index 0000000000..380fef4aae Binary files /dev/null and b/Lab 1/Prototypes/Idea: Storyboard 1 - The Phone.heic differ diff --git a/Lab 1/Prototypes/Idea: Storyboard 1 - The Phone.jpg b/Lab 1/Prototypes/Idea: Storyboard 1 - The Phone.jpg new file mode 100644 index 0000000000..3e545f08de Binary files /dev/null and b/Lab 1/Prototypes/Idea: Storyboard 1 - The Phone.jpg differ diff --git a/Lab 1/Prototypes/Idea: Storyboard 1 - The Remote.HEIC b/Lab 1/Prototypes/Idea: Storyboard 1 - The Remote.HEIC new file mode 100644 index 0000000000..4f3a679532 Binary files /dev/null and b/Lab 1/Prototypes/Idea: Storyboard 1 - The Remote.HEIC differ diff --git a/Lab 1/Prototypes/Idea: Storyboard 3 - The Remote.jpg b/Lab 1/Prototypes/Idea: Storyboard 3 - The Remote.jpg new file mode 100644 index 0000000000..714149bb9b Binary files /dev/null and b/Lab 1/Prototypes/Idea: Storyboard 3 - The Remote.jpg differ diff --git a/Lab 1/Prototypes/Idea: Storyboard 6 - The Sunrise Light.heic b/Lab 1/Prototypes/Idea: Storyboard 6 - The Sunrise Light.heic new file mode 100644 index 0000000000..50593a81ac Binary files /dev/null and b/Lab 1/Prototypes/Idea: Storyboard 6 - The Sunrise Light.heic differ diff --git a/Lab 1/Prototypes/Idea: Storyboard 6 - The Sunrise Light.jpg b/Lab 1/Prototypes/Idea: Storyboard 6 - The Sunrise Light.jpg new file mode 100644 index 0000000000..ee9f55afbd Binary files /dev/null and b/Lab 1/Prototypes/Idea: Storyboard 6 - The Sunrise Light.jpg differ diff --git a/Lab 1/Prototypes/text b/Lab 1/Prototypes/text new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/Lab 1/Prototypes/text @@ -0,0 +1 @@ + diff --git a/Lab 1/README.md b/Lab 1/README.md index 4686781725..612ea14dde 100644 --- a/Lab 1/README.md +++ b/Lab 1/README.md @@ -1,8 +1,8 @@ - + # Staging Interaction -\*\***NAME OF COLLABORATOR HERE**\*\* +\*\***N/A**\*\* In the original stage production of Peter Pan, Tinker Bell was represented by a darting light created by a small handheld mirror off-stage, reflecting a little circle of light from a powerful lamp. Tinkerbell communicates her presence through this light to the other characters. See more info [here](https://en.wikipedia.org/wiki/Tinker_Bell). @@ -73,23 +73,105 @@ _Goals:_ What are the goals of each player? (e.g., jumping to a tree, opening th The interactive device can be anything *except* a computer, a tablet computer or a smart phone, but the main way it interacts needs to be using light. \*\***Describe your setting, players, activity and goals here.**\*\* + + + + +**Storyboard 1 – Charging Status Indicator** +Setting: A bedroom nightstand or desk, at night or during the day when the phone needs charging. +Players: The phone owner. +Activity: The user places their phone on the wireless charger and glances at the light to check charging progress. +Goals: + User: To know when the phone is fully charged without unlocking it. + Device: To communicate charging status clearly using light colour. + +**Storyboard 2 – Smart Entryway Weather Indicator** +Setting: House entryway. +Players: The person living in the house (and potentially family/housemates). +Activity: The user checks the light above the door before heading outside. +Goals: + User: To know if they need an umbrella, coat, or other gear before going out. + Device: To indicate good or bad weather quickly. + +**Storyboard 3 – Remote Locator** +Setting: Living room. +Players: The person watching TV. +Activity: The user notices the remote is missing, uses the app to trigger the light on the remote, and retrieves it. +Goals: + User: To find the remote quickly and avoid wasting time searching. + Device: To make the remote’s location obvious through a glowing light. + +**Storyboard 4 – Smart Mailbox** +Setting: Outside the house or at the apartment mailbox area, any time mail is expected. +Players: Resident, mail carrier. +Activity: The resident looks at the mailbox light before walking to it. +Goals: + User: To avoid checking an empty mailbox unnecessarily or to check at night. + Device: To signal whether mail has been delivered (green = empty, red = full). + +**Storyboard 5 – Smart Pot** +Setting: Kitchen stove area. +Players: Home cook, pot, stove. +Activity: The user puts water to boil, and the pot detects temperature rise, turning on a light when boiling point is reached. +Goals: + User: To know when water reaches boiling without constantly watching. + Device: To visually alert user with light when boiling is complete. + +**Storyboard 6 – Sunrise Alarm Light** +Setting: Bedroom, early morning before wake-up time. +Players: User (sleeping person). +Activity: Light gradually brightens before wake-up time to simulate sunrise. +Goals: + User: To wake up more naturally and gently. + Device: To gradually prepare user’s body for waking by increasing light brightness. + +**Storyboard 7 – Fish Tank Monitor** +Setting: Living room or wherever the fish bowl is placed +Players: Fish, fish owner. +Activity: Light on the lid indicates water quality. Owner refills or cleans water when needed. +Goals: + User: To maintain healthy water for the fish and know when it needs changing. + Device: To alert user to water condition through light colour changes. + + Storyboards are a tool for visually exploring a users interaction with a device. They are a fast and cheap method to understand user flow, and iterate on a design before attempting to build on it. Take some time to read through this explanation of [storyboarding in UX design](https://www.smashingmagazine.com/2017/10/storyboarding-ux-design/). Sketch seven storyboards of the interactions you are planning. **It does not need to be perfect**, but must get across the behavior of the interactive device and the other characters in the scene. \*\***Include pictures of your storyboards here**\*\* + + + + + + + + Present your ideas to the other people in your breakout room (or in small groups). You can just get feedback from one another or you can work together on the other parts of the lab. \*\***Summarize feedback you got here.**\*\* +I presented my seven storyboard ideas to a small group. I explained the plan (setting, players, activity, and goals)for each storyboard and showed my 4–frame sketches. + +**Feedback I received:** +They liked that the interactions were very clear and easy to understand - like a storybook. +They suggested keeping the light colour consistent across frames to avoid confusion (yellow = warming/ neutral, green = good, red = bad). +They mentioned that for the sunrise alarm, the gradual brightness idea was intuitive and could work well in real life e.g. at hotels or luxury apartments. + +**What I improved:** +I made sure the light colours in all storyboards were consistent and clearly indicated the right status. +I simplified my sketches to make them easier to follow (fewer background details). +I made the captions shorter and more direct so they fit neatly under each frame. ## Part B. Act out the Interaction Try physically acting out the interaction you planned. For now, you can just pretend the device is doing the things you’ve scripted for it. -\*\***Are there things that seemed better on paper than acted out?**\*\* +\*\***Are there things that seemed better on paper than acted out?**\*\* +Yes, some transitions (like charging light going from red to green) needed to be slower and smoother to feel natural. On paper/storyboards it seemed fine to just “switch” colours due to the 'scene' changing, but a gradual transition is more intuitive in reality. -\*\***Are there new ideas that occur to you or your collaborator that come up from the acting?**\*\* +\*\***Are there new ideas that occur to you or your collaborator that come up from the acting?**\*\* +Yes, I thought it would be helpful if the brightness or flash rate could indicate urgency (e.g. flashing faster/higher frequency when the user is very close to the lost remote). ## Part C. Prototype the device @@ -102,19 +184,39 @@ We invented this tool for this lab! If you run into technical issues with this tool, you can also use a light switch, dimmer, etc. that you can can manually or remotely control. -\*\***Give us feedback on Tinkerbelle.**\*\* +\*\***Give us feedback on Tinkerbelle.**\*\* +Tinkerbelle was successful in supporting colour changes during prototyping. An additional feature to allow users to input hex codes or directly manipulate RGB values would improve the precision when selecting specific colours and transitions. A feature to save frequently used colours as presets could also make switching between states more efficient during testing. - -## Part D. Wizard the device +## Part D. Wizard the device Take a little time to set up the wizarding set-up that allows for someone to remotely control the device while someone acts with it. Hint: You can use Zoom to record videos, and you can pin someone’s video feed if that is the scene which you want to record. \*\***Include your first attempts at recording the set-up video here.**\*\* -Now, hange the goal within the same setting, and update the interaction with the paper prototype. + + +Now, change the goal within the same setting, and update the interaction with the paper prototype. \*\***Show the follow-up work here.**\*\* +https://github.com/user-attachments/assets/57b08363-a2df-45cc-8f6b-99578a857abc + + +**Charging Status Indicator: Goal change → ** +Instead of checking whether the phone is fully charged, the user now needs to know if the device is charging at all (e.g. cable is loose, battery is faulty). Yellow previously indicated the device was charging and green indicated that charging was complete. Now, red indicates that no charge is taking place. + + + + + + + +https://github.com/user-attachments/assets/8f74b6bc-adff-4e45-b62c-a2bde9957c3b + + + + + ## Part E. Costume the device Only now should you start worrying about what the device should look like. Develop three costumes so that you can use your phone as this device. @@ -122,17 +224,70 @@ Only now should you start worrying about what the device should look like. Devel Think about the setting of the device: is the environment a place where the device could overheat? Is water a danger? Does it need to have bright colors in an emergency setting? \*\***Include sketches of what your devices might look like here.**\*\* +**Idea/ Storyboard 1: Charging Status Indicator** + + + +The Phone and its Charger: + + + +**Idea / Storyboard 3: Remote Locator** +The Remote: + + +**Idea / Storyboard 6: Sunrise Light** +The Sunrise Light: + \*\***What concerns or opportunitities are influencing the way you've designed the device to look?**\*\* +**Charging Status Indicator (Storyboard/Idea 1)** +The charger was sketched to resemble popular upright phone chargers, such as MagSafe or Anker stands, with a visible status light at the front. This familiar design makes it intuitive to use and easy to integrate into existing routines. +The phone costume was created by cutting a rectangle from paper to represent the phone and a battery-shaped cut-out in its centre to show charging status. A charger costume was built from cardboard as an open-roof box that the phone can sit inside. The front of the box has a circular cut-out where the Tinkerbelle phone’s light can shine through, displaying yellow for “charging” and green for “fully charged.” This design choise emphasises visibility of status and allows quick recognition of the current charging state. + +**Remote Locator (Storyboard/ Idea 3)** +The remote was kept close to a natural, familiar remote shape, since users are already accustomed to rectangular remotes. For the costume, the remote was first drawn on paper, then reinforced with cardboard so that it would feel more solid and resist bending over when placed in couch cushions. A circular cut-out was added to indicate where the light would appear. The decision to keep the shape and layout simple ensures that users immediately recognise it as a remote, while the white paper surface improves visibility. + +**Sunrise Light (Storyboard/ Idea 6)** +Two designs were sketched: one resembling a hanging bulb to act as a ceiling-mounted light, and another shaped like a cone that could sit on a bedside table. The cone design was chosen for the prototype because it can take on different colours and patterns (similar to star or galaxy projectors ) which creates an opportunity to make waking up a more enjoyable experience. The costume was made by folding an A4 sheet of paper into a cone to resemble a lamp shade. This setup diffuses light evenly, reduces glare, and clearly communicates its function as a sunrise or ambient light. + ## Part F. Record \*\***Take a video of your prototyped interaction.**\*\* +**Charging Status Indicator (Storyboard/Idea 1)** + + +https://github.com/user-attachments/assets/00925fbf-f1ee-49ee-a3be-b61e032543b3 + + + +**Remote Locator (Storyboard/ Idea 3)** + + + +https://github.com/user-attachments/assets/c1ff88bb-b948-4bd6-9daf-79846a9a05e3 + + + + +**Sunrise Light (Storyboard/ Idea 6)** + + + + +https://github.com/user-attachments/assets/540962fb-3488-4f18-ab74-328034f6c090 + + + + + \*\***Please indicate who you collaborated with on this Lab.**\*\* Be generous in acknowledging their contributions! And also recognizing any other influences (e.g. from YouTube, Github, Twitter) that informed your design. +All other group members dropped the course. # Staging Interaction, Part 2 @@ -144,7 +299,10 @@ This describes the second week's work for this lab activity. You will be assigned three partners from other groups. Go to their github pages, view their videos, and provide them with reactions, suggestions & feedback: explain to them what you saw happening in their video. Guess the scene and the goals of the character. Ask them about anything that wasn’t clear. -\*\***Summarize feedback from your partners here.**\*\* +\*\***Summarize feedback from your partners here.**\*\* +They found the captions on both the storyboards and videos very helpful for understanding the intended interactions. They thought the remote locator idea was particularly interesting and suggested it could be extended to other objects such as car keys, house keys, or wallets. They recommended adding vibration or sound feedback, as well as increasing the frequency of the light flashes, haptic pulses, or sound as the user gets closer to the object. + +The sunrise light idea received the most positive feedback. Students from other groups commented that both versions were great and that this approach could provide a gentler, more effective way to wake not just adults, but also children and pets. A suggested extension was to add a “disco mode” or allow the light to double as a projector for playful or decorative use when waking. ## Make it your own @@ -154,3 +312,64 @@ Do last week’s assignment again, but this time: 3) We will be grading with an emphasis on creativity. \*\***Document everything here. (Particularly, we would like to see the storyboard and video, although photos of the prototype are also great.)**\*\* + +## Part A. Plan +\*\***Describe your setting, players, activity and goals here.**\*\* +**Storyboard Plan – Sunrise Alarm** +Setting: Bedroom, early morning before wake-up time. +Players: User (sleeping person). +Activity: Light gradually brightens before wake-up time to simulate sunrise, while a soft chime plays to complement the visual cue. +Goals: + User: To wake up more naturally and gently. + Device: To gradually prepare the user’s body for waking by increasing light brightness and softly signalling with sound. + + +\*\***Include pictures of your storyboards here**\*\* + + + + +\*\***Summarize feedback you got here.**\*\* +- Most people felt it would be gentle way to wake up, potentially useful even for children or pets. +- One suggestion was to add extra modes such as a “disco” feature or to have it double as a projector for decorative patterns. + + +## Part B. Act out the Interaction + +Try physically acting out the interaction you planned. For now, you can just pretend the device is doing the things you’ve scripted for it. + +\*\***Are there things that seemed better on paper than acted out?**\*\* +In addition to the gradual change in light brightness from dim to warm white, the accompanying music or chime should also transition smoothly. While the storyboard could show a simple “on/off” cue, implementing gradual changes in both light and sound creates a more natural and gentle wake-up experience. + +\*\***Are there new ideas that occur to you or your collaborator that come up from the acting?**\*\* +The combination of visual and auditory cues was identified as a way to make the interaction more engaging and effective, particularly for users who may be heavy sleepers or for waking children & pets gently. A vibration feature could also be beneficial for the table version. + +## Part C. Prototype the device +See Part F. + +## Part D. Wizard the device +See Part F. + +## Part E. Costume the device +\*\***Include sketches of what your devices might look like here.**\*\* + + + + + +\*\***What concerns or opportunitities are influencing the way you've designed the device to look?**\*\* +Two designs were initially sketched: one resembling a hanging ceiling bulb, and another shaped like a cone that could sit on a bedside table. The hanging bulb design was chosen for the prototype, since the table-top cone was used in Lab 1a. It was constructed by laying strips of paper in a crisscross pattern — one vertical, one horizontal, and two along the diagonals — and then curving them upward to form a spherical shape. A thin strip of paper was attached to the top to resemble a hanging fixture, similar to a ceiling light. This design diffuses light evenly, reduces glare, and clearly communicates its function as a sunrise or ambient light (similar to the initial sketch), while also creating an opportunity to display soft patterns or colours for a pleasant wake-up experience. + + +## Part F. Record + +\*\***Take a video of your prototyped interaction.**\*\* + + +https://github.com/user-attachments/assets/9a37bf3d-536c-48ca-8d33-0971aa1814a7 + + + + +\*\***Please indicate who you collaborated with on this Lab.**\*\* +All other group members have dropped the course. diff --git a/Lab 1/Recordings/Storyboard- Idea 6 Recording.mp4 b/Lab 1/Recordings/Storyboard- Idea 6 Recording.mp4 new file mode 100644 index 0000000000..3aea958138 Binary files /dev/null and b/Lab 1/Recordings/Storyboard- Idea 6 Recording.mp4 differ diff --git a/Lab 1/Recordings/Storyboard: Idea 1 Recording.mp4 b/Lab 1/Recordings/Storyboard: Idea 1 Recording.mp4 new file mode 100644 index 0000000000..2bd7ac3e25 Binary files /dev/null and b/Lab 1/Recordings/Storyboard: Idea 1 Recording.mp4 differ diff --git a/Lab 1/Recordings/Storyboard: Idea 3 Recording.mp4 b/Lab 1/Recordings/Storyboard: Idea 3 Recording.mp4 new file mode 100644 index 0000000000..ba21a0d120 Binary files /dev/null and b/Lab 1/Recordings/Storyboard: Idea 3 Recording.mp4 differ diff --git a/Lab 1/Recordings/text b/Lab 1/Recordings/text new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/Lab 1/Recordings/text @@ -0,0 +1 @@ + diff --git a/Lab 1/Sketches/Sketches.jpg b/Lab 1/Sketches/Sketches.jpg new file mode 100644 index 0000000000..21aafacd56 Binary files /dev/null and b/Lab 1/Sketches/Sketches.jpg differ diff --git a/Lab 1/Sketches/text b/Lab 1/Sketches/text new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/Lab 1/Sketches/text @@ -0,0 +1 @@ + diff --git a/Lab 1/Storyboards/Storyboard 1 - Charging Status Indicator.jpg b/Lab 1/Storyboards/Storyboard 1 - Charging Status Indicator.jpg new file mode 100644 index 0000000000..e24aa3cae8 Binary files /dev/null and b/Lab 1/Storyboards/Storyboard 1 - Charging Status Indicator.jpg differ diff --git a/Lab 1/Storyboards/Storyboard 2 - Weather Indicator.jpg b/Lab 1/Storyboards/Storyboard 2 - Weather Indicator.jpg new file mode 100644 index 0000000000..d2f19fa2cd Binary files /dev/null and b/Lab 1/Storyboards/Storyboard 2 - Weather Indicator.jpg differ diff --git a/Lab 1/Storyboards/Storyboard 3 - Remote Locator.jpg b/Lab 1/Storyboards/Storyboard 3 - Remote Locator.jpg new file mode 100644 index 0000000000..852db94de4 Binary files /dev/null and b/Lab 1/Storyboards/Storyboard 3 - Remote Locator.jpg differ diff --git a/Lab 1/Storyboards/Storyboard 4 - Smart Mailbox.jpg b/Lab 1/Storyboards/Storyboard 4 - Smart Mailbox.jpg new file mode 100644 index 0000000000..f330f4ff7d Binary files /dev/null and b/Lab 1/Storyboards/Storyboard 4 - Smart Mailbox.jpg differ diff --git a/Lab 1/Storyboards/Storyboard 5 - Smart Pot.jpg b/Lab 1/Storyboards/Storyboard 5 - Smart Pot.jpg new file mode 100644 index 0000000000..236f0e68d3 Binary files /dev/null and b/Lab 1/Storyboards/Storyboard 5 - Smart Pot.jpg differ diff --git a/Lab 1/Storyboards/Storyboard 6 - Sunrise Light.jpg b/Lab 1/Storyboards/Storyboard 6 - Sunrise Light.jpg new file mode 100644 index 0000000000..b1cdcf2cab Binary files /dev/null and b/Lab 1/Storyboards/Storyboard 6 - Sunrise Light.jpg differ diff --git a/Lab 1/Storyboards/Storyboard 7 - Fish Tank Monitor.jpg b/Lab 1/Storyboards/Storyboard 7 - Fish Tank Monitor.jpg new file mode 100644 index 0000000000..74ec07293d Binary files /dev/null and b/Lab 1/Storyboards/Storyboard 7 - Fish Tank Monitor.jpg differ diff --git a/Lab 1/Storyboards/text b/Lab 1/Storyboards/text new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/Lab 1/Storyboards/text @@ -0,0 +1 @@ + diff --git a/Lab 2/Extending the Pi.md b/Lab 2/Extending the Pi.md new file mode 100644 index 0000000000..d8f9242a7d --- /dev/null +++ b/Lab 2/Extending the Pi.md @@ -0,0 +1,123 @@ +# Extending the Pi + +To extend the Pi, we are using breakout boards that connect to the PI using a standard communication bus [I2C](https://learn.sparkfun.com/tutorials/i2c/all). [StemmaQT](https://learn.adafruit.com/introducing-adafruit-stemma-qt/what-is-stemma) and [Qwiic](https://www.sparkfun.com/qwiic#overview) use a standardized 4-pin connector to connect devices using the I2C protocol. + +The StemmaQT and I2C parts often have a fixed I2C address; to differentiate between similar parts, the devices often have pads that allow additional bits to be pulled high or low. The addresses are in [hexidecimal](https://learn.sparkfun.com/tutorials/hexadecimal/introduction) format, things like `0x6f`. This is the hexadecimal (or hex) representation for the decimal number `111` which is represented as `1101111` in binary. You are not expected to make any kinds of conversions but should have some conceptual grasp that a hex value is just a number shown another way. [This Python library](https://towardsdatascience.com/binary-hex-and-octal-in-python-20222488cee1) will assist you if you need help manipulating hexidecimal numbers. + +## Connecting a Button + +The buttons you've used on the screen are quite simple. Aside from [debouncing](https://learn.adafruit.com/make-it-switch/debouncing), when you press down you are closing a circuit, allowing electricity flow to the pins wired to the two buttons, in this case [GPIO 23](https://pinout.xyz/pinout/pin16_gpio23) and [24](https://pinout.xyz/pinout/pin18_gpio24). That's a perfectly reasonable way to connect a button. I2C is not typically used for buttons but here, we demonstrate one way you might see it. This also allows additional functionality to be built right into the button, such as the ability to remember the last time it was pressed. + +### Hardware + +From your kit, take out the [mini-PiTFT](https://learn.adafruit.com/adafruit-mini-pitft-135x240-color-tft-add-on-for-raspberry-pi), a [stemmaQT cable](https://www.adafruit.com/product/4210) and the [Qwiic Button](https://www.sparkfun.com/products/16842).
+
+
+
+
+
+
+
+
+## Connecting a Sensor
+
+Your kit is full of sensors! Look up what they can do and feel free to ask your TAs, we love to talk sensors. We will go further in depth into sensors in the coming weeks, but we put this small sample here to demonstrate how you can get sensor data if you want to use it for your project this week.
+
+We are going to connect the [Adafruit APDS9960 Proximity, Light, RGB, and Gesture Sensor](https://www.adafruit.com/product/3595). You can leave the button plugged in and daisy-chain the sensor, this is part of the magic of I2C.
+
+
+
+
+Now run `python proximity.py`. What did you see? Check out [here](https://learn.adafruit.com/adafruit-apds9960-breakout/circuitpython) to learn more about the sensor and think about how you might be able to apply it in the future projects!
diff --git a/Lab 2/I2C_scan.py b/Lab 2/I2C_scan.py
new file mode 100644
index 0000000000..b6bf377e1f
--- /dev/null
+++ b/Lab 2/I2C_scan.py
@@ -0,0 +1,14 @@
+import board
+import busio
+
+# Try to create an I2C device
+i2c = busio.I2C(board.SCL, board.SDA)
+print("I2C ok!")
+# ids = '\n'.join(map(str,i2c.scan()))
+# print(f"I2C device ID's found:\n{ids}")
+
+while not i2c.try_lock():
+ pass
+
+print("I2C addresses found:", [hex(device_address) for device_address in i2c.scan()])
+i2c.unlock()
\ No newline at end of file
diff --git a/Lab 2/Other ways to connect IxE to your computer.md b/Lab 2/Other ways to connect IxE to your computer.md
new file mode 100644
index 0000000000..9c074b2663
--- /dev/null
+++ b/Lab 2/Other ways to connect IxE to your computer.md
@@ -0,0 +1,299 @@
+
+## Connect IxE to your computer via the computer Ethernet port
+
+(based off of instructions from [Nikmart's IxE Git](https://github.com/nikmart/interaction-engine/wiki/Connect-IxE-to-your-computer-via-Ethernet-port))
+
+## Connecting to The HOUSE Wifi
+
+1. Register the MAC address of your Raspberry Pi on The House network at https://selfcare.boingohotspot.net/login using Add a Device.
+1. Edit the `/etc/wpa_supplicant/wpa_supplicant.conf` file with `nano` OR on the `\boot` volume that you see when the SD card is plugged into your computer, is a file called: `wpa_supplicant.conf.bak`. Duplicate the file and rename the duplicate to `wpa_supplicant.conf`. Now edit the duplicated file (`wpa_supplicant.conf`) and add the house wifi to the list of networks to connect to as shown below. Then safely eject the sd card, plug it back into the Pi and power it back up.
+1. The section you need to add is
+```shell
+ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev
+network={
+ ssid="The House"
+ key_mgmt=NONE
+}
+```
+
+
+Afterward, your file should look something like the following.
+
+```shell
+update_config=1
+country=US
+
+ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev
+network={
+ ssid="The House"
+ key_mgmt=NONE
+}
+
+ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev
+network={
+ ssid="DeviceFarm"
+ psk="device@theFarm"
+ key_mgmt=WPA-PSK
+}
+
+```
+3. Try logging into your device using ssh from a terminal.
+4. If you need to see what device your IxE is on, use `iwconfig` or find it in this list [here](https://interactivedevice18.slack.com/files/U90LA9TLH/F92HXB020/ixe_ip_mac_hostname.xlsx):
+
+```shell
+pi@ixe42:~ $ iwconfig wlan0
+wlan0 IEEE 802.11 ESSID:"The House"
+ Mode:Managed Frequency:2.462 GHz Access Point: 24:79:2A:21:58:C8
+ Bit Rate=72.2 Mb/s Tx-Power=31 dBm
+ Retry short limit:7 RTS thr:off Fragment thr:off
+ Power Management:on
+ Link Quality=67/70 Signal level=-43 dBm
+ Rx invalid nwid:0 Rx invalid crypt:0 Rx invalid frag:0
+ Tx excessive retries:0 Invalid misc:0 Missed beacon:0
+```
+
+
+
+
+
+
+
+
+
+### Instructions for Mac
+
+1. Plug an ethernet cable from your Mac to the Raspberry Pi (note you may need to use a Thunderbolt to Ethernet or USB to Ethernet adapter if your Mac does not have a built-in Ethernet port).
+
+2. Check that the IxE are getting a self-assigned IP in System Preferences -> Network. It should have an orange color.
+
+3. To get Internet on your Pi, use Internet Sharing and share your Wifi with your Ethernet. (Note: This will not work on 802.11X like eduroam. If you are trying to do this on campus, connect to Cornell Visitor and then share your wifi)
+
+3. Try pinging your IxE with the .local extension: ping ixe05.local
+
+If the ping work, you can ssh in just like normal.
+
+### Instructions for PC
+
+[someone with a pc, please update this...]
+
+## Connect IxE to your computer via a separate WiFI card
+
+You can share a WiFi connection to the wider internet from your laptop if you can bring up a separate Wifi interface on your computer (for example by using a USB Wifi adapter).
+
+### Instructions for Mac
+
+1. Bring up the new WiFi interface. This will likely involve installing the drivers for the device, registering the new interface (for example, by using http://mycomputers.cit.cornell.edu at Cornell), and getting it online.
+
+1. Go to the Sharing control panel to enable Internet sharing from your newly installed interface to the WiFi network which you will share locally. Go to WiFi Options to configure your network to be named DeviceFarm, and the WPA2 password to be the the DeviceFarm password. Finally, check Internet Sharing to turn the sharing on.
+
+1. Power up your IxE. It should come up on your local network, and you should be able to access it via ssh like you would on the class network.
+
+[someone with a pc, please update this...]
+
+## Connect your IxE to your own WiFi
+
+Based on instructions found here: [https://howchoo.com/g/ndy1zte2yjn/how-to-set-up-wifi-on-your-raspberry-pi-without-ethernet](https://howchoo.com/g/ndy1zte2yjn/how-to-set-up-wifi-on-your-raspberry-pi-without-ethernet)
+
+If you have a WiFi router at home that you control, you can connect to it by setting the wifi configuration of your Pi. To do this:
+
+1. Use a text editor on your computer to create a file called `wpa_supplicant.conf` with the following text in it:
+
+```shell
+ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev
+network={
+ ssid="DeviceFarm"
+ psk="device@theFarm"
+ key_mgmt=WPA-PSK
+}
+
+ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev
+network={
+ ssid="YOUR WIFI NAME HERE"
+ psk="YOUR WIFI PASSWORD HERE"
+ key_mgmt=WPA-PSK
+}
+```
+2. Plug the SD card with the IxE image on it into your computer.
+You should see a disk drive called `boot` mount to your computer.
+
+3. Open `boot` and copy the `wpa_supplicant.conf` file into the directory.
+
+4. Safely eject the SD card from your computer.
+
+5. Plug the SD card back into your IxE, then plug it into USB power.
+
+When the Pi boots up, it will copy the `wpa_supplicant.conf` file into the WiFi settings directory in `/etc/wpa_wupplicant/`. This will update your WiFi setting and should get the Pi on your home wifi.
+
+
+
+## Connecting to RedRover
+You can get your Pi working on Cornell's `RedRover` network by:
+
+### Registering your Pi's MAC address to your Cornell account at: [https://dnsdb.cit.cornell.edu/dnsdb-cgi/mycomputers.cgi](https://dnsdb.cit.cornell.edu/dnsdb-cgi/mycomputers.cgi)
+
+You can find your MAC address using the spreadsheet (IXE_IP_MAC_HOSTNAME) we provided on the class Slack. The MAC address is associated with you ixe hostname in the form ixe[00] where [00] are your numbers.
+
+Register your MAC address as one of your devices. We recommend you name is ixe[00] so you know which registration this is for.
+
+### Adding a python script to your machine to email the ixe's IP to you
+
+1. While you are logged into you Pi (from DeviceFarm, The House, or through ethernet), create a new file for the `python` script that will email the IP to you
+
+```shell
+nano startup_mailer.py
+```
+
+2. Copy and paste this python code into the editor
+
+```python
+import subprocess
+import smtplib
+import socket
+from email.mime.text import MIMEText
+import datetime
+
+# Change to your own account information
+to = 'YOUREMAIL@DOMAIN.com'
+gmail_user = 'interactiveDeviceDesign@gmail.com'
+gmail_password = 'device@theFarm'
+smtpserver = smtplib.SMTP('smtp.gmail.com', 587)
+smtpserver.ehlo()
+smtpserver.starttls()
+smtpserver.ehlo
+smtpserver.login(gmail_user, gmail_password)
+today = datetime.date.today()
+
+# Very Linux Specific
+arg='ip route list'
+p=subprocess.Popen(arg,shell=True,stdout=subprocess.PIPE)
+data = p.communicate()
+split_data = data[0].split()
+ipaddr = split_data[split_data.index('src')+1]
+my_ip = 'ixe[00] ip is %s' % ipaddr
+msg = MIMEText(my_ip)
+msg['Subject'] = 'IP for ixe58 on %s' % today.strftime('%b %d %Y')
+msg['From'] = gmail_user
+msg['To'] = to
+smtpserver.sendmail(gmail_user, [to], msg.as_string())
+smtpserver.quit()
+```
+
+This script is setup with our class GMail account, `interactiveDeviceDesign@gmail.com`. We recommend you use this so that you do not need to store your own GMail password in clear text.
+
+3. Look for the line `to = 'YOUREMAIL@DOMAIN.com'` and replace the email address with your email. Any email like your GMail or Cornell Email should work fine.
+
+4. Put your ixe's number in the lines `my_ip = 'ixe[00] ip is %s' % ipaddr` and `msg['Subject'] = 'IP For ixe58 on %s' % today.strftime('%b %d %Y')` replacing the `[00]` with your number.
+
+4. Save the file and exit `nano` (using Ctrl+X, then choosing `yes`, then saving to `startup_mailer.py'
+
+5. Test the python code by running `python /home/pi/startup_mailer.py`. You should get an email with your IP address in about a minute.
+
+The email should look like this:
+
+```text
+From: interactivedevicedesign@gmail.com
+To: YOUREMAIL@DOMAIN.com
+
+ixe[00] ip is xxx.xxx.xxx.xxx <-- this will be your ixe number and the IP it has currently
+```
+
+**NOTE: A RedRover IP will be on 10.xxx.xxx.xxx. If you get something like 192.xxx.xxx.xxx then you are probably connected to `DeviceFarm`**
+
+6. Tell your Pi to run the `startup_mailer.py` code when your pi reboots using `cron` (a [cool Unix tool](https://en.wikipedia.org/wiki/Cron) that allows you to automate things on your machine)
+
+```shell
+crontab -e
+```
+
+If `cron` asks you to choose an editor, we recommend choosing option `2 - nano`
+
+Once you are in `nano` you will edit the `crontab` file which lets you schedule when to run certain things
+
+```
+# Edit this file to introduce tasks to be run by cron.
+#
+# Each task to run has to be defined through a single line
+# indicating with different fields when the task will be run
+# and what command to run for the task
+#
+# To define the time you can provide concrete values for
+# minute (m), hour (h), day of month (dom), month (mon),
+# and day of week (dow) or use '*' in these fields (for 'any').#
+# Notice that tasks will be started based on the cron's system
+# daemon's notion of time and timezones.
+#
+# Output of the crontab jobs (including errors) is sent through
+# email to the user the crontab file belongs to (unless redirected).
+#
+# For example, you can run a backup of all your user accounts
+# at 5 a.m every week with:
+# 0 5 * * 1 tar -zcf /var/backups/home.tgz /home/
+#
+# For more information see the manual pages of crontab(5) and cron(8)
+#
+# m h dom mon dow command
+```
+
+Add the following line to the bottom of the file (make sure there is no `#` symbol as this makes the line a comment)
+
+```
+@reboot sleep 30 && python /home/pi/startup_mailer.py
+```
+
+This line tells your Pi to run `python /home/pi/startup_mailer.py` when your machine reboots. The `sleep 30` is there to give your Pi 30 seconds to wake up and load all the system resources before it emails you your IP (we have found that not having the sleep delay means the script does not send an email, probably because the Pi doesn't have an IP).
+
+Save and exit `nano` (using `Ctrl+X`, `yes`)
+
+7. Edit your `wpa_supplicant.conf` WiFi settings
+
+```shell
+sudo nano /etc/wpa_supplicant/wpa_supplicant.conf
+```
+
+Add the following lines to the top of the file, above the `DeviceFarm` settings if you would prefer it to use `RedRover` before using `DeviceFarm`
+
+```text
+ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev
+network={
+ ssid="RedRover"
+ key_mgmt=NONE
+}
+```
+
+You can also comment out `DeviceFarm` settings so that you only connect to `RedRover`. Put `#` before all the lines for the `DeviceFarm` config settings.
+
+```text
+#ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev
+#network={
+# ssid="DeviceFarm"
+# psk="device@theFarm"
+# key_mgmt=WPA-PSK
+#}
+```
+
+(If something goes wrong, you can always reset your WiFi settings using the `wpa_supplicant.conf.bak` file in the `boot` directory.)
+
+Save and exit `nano` (`Ctrl+X`, `yes`)
+
+8. Reboot your Pi using `sudo reboot`. If everything is configured correctly, you should get an email with your IP within a minute or two.
+
+### Connecting to your Pi using the IP it has with your laptop on `RedRover` or `eduroam`
+1. Once you receive the email from you Pi, copy the IP address.
+
+**NOTE: A RedRover IP will be on 10.xxx.xxx.xxx. If you get something like 192.xxx.xxx.xxx then you are probably connected to `DeviceFarm`**
+
+2. Make sure your laptop is connected to `RedRover` or `eduroam` (`Cornell Visitor` will not work)
+
+#### On Mac/Linux
+Open your Terminal (on Mac/Linux) or PuTTY (on Windows) and ssh using the IP address from the email
+
+```shell
+ssh pi@xx.xx.xx.xx
+```
+
+#### On Windows
+Use the IP from the email as as the location instead of `ixe[00]`. Make sure the `Port` is set to `22`
+
+3. You can access the webpage running on port `8000` (in our examples like `helloYou`) by going to the IP address then port 8000 iun your browser window
+
+`ex: 10.148.131.xxx:8000`
diff --git a/Lab 2/PlacingMiniPiTFTonPi.jpg b/Lab 2/PlacingMiniPiTFTonPi.jpg
new file mode 100644
index 0000000000..dfa25a7e50
Binary files /dev/null and b/Lab 2/PlacingMiniPiTFTonPi.jpg differ
diff --git a/Lab 2/README.md b/Lab 2/README.md
new file mode 100644
index 0000000000..192134e3d0
--- /dev/null
+++ b/Lab 2/README.md
@@ -0,0 +1,277 @@
+# Interactive Prototyping: The Clock of Pi
+**NAMES OF COLLABORATORS HERE**
+
+Does it feel like time is moving strangely during this semester?
+
+For our first Pi project, we will pay homage to the [timekeeping devices of old](https://en.wikipedia.org/wiki/History_of_timekeeping_devices) by making simple clocks.
+
+It is worth spending a little time thinking about how you mark time, and what would be useful in a clock of your own design.
+
+**Please indicate anyone you collaborated with on this Lab here.**
+Be generous in acknowledging their contributions! And also recognizing any other influences (e.g. from YouTube, Github, Twitter) that informed your design.
+
+## Prep
+
+Lab Prep is extra long this week. Make sure to start this early for lab on Thursday.
+
+1. ### Set up your Lab 2 Github
+
+Before the start of lab Thursday, ensure you have the latest lab content by updating your forked repository.
+
+**📖 [Follow the step-by-step guide for safely updating your fork](pull_updates/README.md)**
+
+This guide covers how to pull updates without overwriting your completed work, handle merge conflicts, and recover if something goes wrong.
+
+
+2. ### Get Kit and Inventory Parts
+Prior to the lab session on Thursday, taken inventory of the kit parts that you have, and note anything that is missing:
+
+***Update your [parts list inventory](partslist.md)***
+
+3. ### Prepare your Pi for lab this week
+[Follow these instructions](prep.md) to download and burn the image for your Raspberry Pi before lab Thursday.
+
+
+
+
+## Overview
+For this assignment, you are going to
+
+A) [Connect to your Pi](#part-a)
+
+B) [Try out cli_clock.py](#part-b)
+
+C) [Set up your RGB display](#part-c)
+
+D) [Try out clock_display_demo](#part-d)
+
+E) [Modify the code to make the display your own](#part-e)
+
+F) [Make a short video of your modified barebones PiClock](#part-f)
+
+G) [Sketch and brainstorm further interactions and features you would like for your clock for Part 2.](#part-g)
+
+## The Report
+This readme.md page in your own repository should be edited to include the work you have done. You can delete everything but the headers and the sections between the \*\*\***stars**\*\*\*. Write the answers to the questions under the starred sentences. Include any material that explains what you did in this lab hub folder, and link it in the readme.
+
+Labs are due on Mondays. Make sure this page is linked to on your main class hub page.
+
+## Part A.
+### Connect to your Pi
+Just like you did in the lab prep, ssh on to your pi. Once you get there, create a Python environment (named venv) by typing the following commands.
+
+```
+ssh pi@
+
+The Raspberry Pi 4 has a variety of interfacing options. When you plug the pi in the red power LED turns on. Any time the SD card is accessed the green LED flashes. It has standard USB ports and HDMI ports. Less familiar it has a set of 20x2 pin headers that allow you to connect a various peripherals.
+
+
+
+To learn more about any individual pin and what it is for go to [pinout.xyz](https://pinout.xyz/pinout/3v3_power) and click on the pin. Some terms may be unfamiliar but we will go over the relevant ones as they come up.
+
+### Hardware (you have already done this in the prep)
+
+From your kit take out the display and the [Raspberry Pi 5](https://www.google.com/url?sa=i&url=https%3A%2F%2Fwww.raspberrypi.com%2Fproducts%2Fraspberry-pi-5%2F&psig=AOvVaw330s4wIQWfHou2Vk3-0jUN&ust=1757611779758000&source=images&cd=vfe&opi=89978449&ved=0CBMQjRxqFwoTCPi1-5_czo8DFQAAAAAdAAAAABAE)
+
+Line up the screen and press it on the headers. The hole in the screen should match up with the hole on the raspberry pi.
+
+
+
+
+
+
+3. Click the gear icon on the bottom right to open Advanced Settings. In here, you need to make two changes:
+- change the "hostname" to something unique
+- ~set the password for user "pi" to something unique to you that you can remember~ Albert says, change the password after you ssh in.
+- do not change any of the other settings (username pi and network should stay as they are)
+
+4. Eject or unmount the microSD card reader, and then remove the SD card from the reader and reinsert it into SD card slot on the Pi: it is located on the bottom (silver rectangle on the right).
+
+
+
+5. Take and connect the Adafruit MiniPiTFT to your pi with the configuration shown below, the MiniPiTFT should be on the top left corner of your Pi.
+
+
+
+6. Boot the Pi by connecting it to a power source with USB-C connector.
+
+### Setting up your Pi to run in headless mode
+
+#### Connecting to your Pi remotely
+
+Unlike your laptop, the Pi doesn't come with its own keyboard or mouse. While you could plug in a monitor, keyboard, and mouse we will be connecting to your Pi over [SSH](https://en.wikipedia.org/wiki/Secure_Shell). You can do this in [Mac Terminal](https://blog.teamtreehouse.com/introduction-to-the-mac-os-x-command-line) or [Windows 10 SSH Client](https://docs.microsoft.com/en-us/windows/terminal/tutorials/ssh).
+
+*Note: This set up assumes you boot your raspberry pi the first time when on campus or in The House. If you have a screen, mouse and keyboard you can edit the /etc/wpa_supplicant/wpa_supplicant.conf on the pi to make it connect to your home network already now.*
+
+
+1. When you boot up your Pi, the MiniPiTFT should have the following information shown:
+
+ ````
+ IP: xxx.xxx.xxx.xxx
+ NET: [YourWifiNetwork]
+ MAC: xx:xx:xx:xx:xx:xx
+ ````
+
+ The IP address is what you will need to SSH your Pi later through the same network. The media access control address (MAC address) is a unique identifier assigned to a network interface controller, you will need it later for registering the device if you are using Cornell network (e.g. RedRover). The NET shows which WiFi network your Pi is connected to.
+
+ For MAC address: If you are planning to use Cornell network (e.g. RedRover and eduroam), you will have to register the device (your Pi) to the Cornell System to get it online. Please follow the instructions [here](https://it.cornell.edu/wifi/register-device-doesnt-have-browser) from Cornell. Register using the MAC address from your Pi's screen. If you are using the House network, you will need to register the device (your Pi) through [whitesky](https://myaccount.wscmdu.com/myaccount/devices). You might need to wait for a few minutes for your Pi to actually get online after registering it.
+
+
+
+
+
+
+3. Verify your Pi is online. In the terminal of your laptop, type `ping
+
+Choose '1. System Options' and 'S3 Password', they terminal will then ask you to enter your new password. Again, the terminal will not show what you type for security so do not worry about it and just make sure you type the correct new password twice. After you change the password successfully, you will have to use the new password next time you SSH to your Pi.
+
+### Refresh your knowledge of command line interfaces:
+
+The command line/terminal is a powerful way to interact with your computer without using a Graphical User Interface (GUI). When you SSH onto your Pi, you have a prompt you can enter commands. In your terminal there is a shell, there are many shells but for this class we will use one of the most common **bash**
+
+ ```
+ pi@raspberrypi:~ $ echo $SHELL
+ /bin/bash
+ pi@raspberrypi:~ $
+ ```
+In the code above we've typed `echo $SHELL`. The `echo` tells it to print something to the screen. You could try typing `echo 'hello'` to see how that works for strings. The `$` at the front of `$SHELL` tells bash we are referring to a variable. In this case it is a variable the OS is using to store the shell program. In a folder `/bin` is a program called bash that we are currently using. The up arrow with show the most recent command.
+
+
+
+#### Navigation in the command line
+
+There are many commands you can use in the command line, they can take a variety of options that change how they are used. You can look these up online to learn more. Many commands have a manual page with documentation that you can see directly in the terminal by typing `man [command]`. For example:
+
+ ```shell
+ pi@raspberrypi:~ $ man echo
+ ECHO(1) User Commands ECHO(1)
+
+ NAME
+ echo - display a line of text
+ SYNOPSIS
+ echo [SHORT-OPTION]... [STRING]...
+ echo LONG-OPTION
+ DESCRIPTION
+ Echo the STRING(s) to standard output.
+ -n do not output the trailing newline
+ -e enable interpretation of backslash escapes
+ -E disable interpretation of backslash escapes (default)
+ --help display this help and exit
+ --version
+ Manual page echo(1) line 1 (press h for help or q to quit)
+ ```
+
+
+These are some useful commands. Read the manual pages for advanced usage.
+
+* `pwd` - print working directory, tells us where on the computer we are
+* `ls` - list the things in the current directory.
+* `cd` - change directory. This lets you move to another folder on your machine.
+* `mkdir` - make directory. You can create directories with this command
+* `cp` - copy a file. You can copy from one place to any other place
+* `mv` - move a file, also used to rename a file
+* `rm` - delete a file. To delete a folder you need the recursive flag `rm -r [folder]`
+* `cat` - view a file
+* `nano` - this is a text editor (there are many) that will let you edit files in terminal.
+
+There is plenty more to learn about using the terminal to navigate a computer but this should give a good start for getting around the raspberry pi.
+
+
+### Using VNC to see your Pi desktop
+Another convenient way to remotely connect to your Pi is using VNC (Virtual Network Computing), it essentially is remote login. The easiest client to use is [VNC Connect](https://www.realvnc.com/en/connect/download/viewer/). Download and install it. Once that's done type the IP address of your Pi in the text-box at the top.
+
+
+After that a login window should appear, use your normal logins (originally: Account=pi, Password=raspberry).
+
+
+You might want to change a few settings to improve the VNC experience such as changing the display resolution.
+To change the resolution, run the command sudo raspi-config, navigate to Display Options > VNC Resolution, and choose an option.
+See here for more troubleshooting [realvnc.com Pi Setup](https://help.realvnc.com/hc/en-us/articles/360002249917-VNC-Connect-and-Raspberry-Pi).
+
+
+At that point the normal RPi desktop should appear and you can start and stop programs from here.
+
+### Setting up WendyTA - Your AI Teaching Assistant
+
+For this course, we have **WendyTA**, an AI Teaching Assistant that can help you with coding, debugging, brainstorming, and learning. WendyTA is automatically activated through GitHub Copilot Chat when working in this repository.
+
+**📖 Learn more about WendyTA**: [WendyTA Documentation](https://github.com/IRL-CT/Interactive-Lab-Hub/tree/Fall2025/WendyTA)
+
+#### Recommended Setup Options:
+
+1. **VS Code Server on your laptop** (Recommended): Use VS Code's Remote SSH extension to connect to your Pi and code directly with WendyTA available.
+
+2. **VNC + VS Code on Pi**: Use VNC to access the Pi desktop and install VS Code there with GitHub Copilot extension.
+
+**Setup Instructions**: [WendyTA Copilot Setup Guide](https://github.com/IRL-CT/Interactive-Lab-Hub/blob/Fall2025/WendyTA/setup/copilot-setup.md)
+
+✨ **Note**: WendyTA works through both SSH/VS Code Server and VNC connections, so choose the method that works best for your setup!
+
+
diff --git a/Lab 2/proximity.py b/Lab 2/proximity.py
new file mode 100644
index 0000000000..7de0ffa4cf
--- /dev/null
+++ b/Lab 2/proximity.py
@@ -0,0 +1,13 @@
+import board
+import busio
+import adafruit_apds9960.apds9960
+import time
+i2c = busio.I2C(board.SCL, board.SDA)
+sensor = adafruit_apds9960.apds9960.APDS9960(i2c)
+
+sensor.enable_proximity = True
+
+while True:
+ prox = sensor.proximity
+ print(prox)
+ time.sleep(0.2)
\ No newline at end of file
diff --git a/Lab 2/pull_updates/README.md b/Lab 2/pull_updates/README.md
index b1b6c47a06..3aaa08f78a 100644
--- a/Lab 2/pull_updates/README.md
+++ b/Lab 2/pull_updates/README.md
@@ -45,7 +45,37 @@ If you see merge conflicts:
2. Click **"Resolve conflicts"** on GitHub's web interface
3. Or pull the changes locally and resolve conflicts in your editor
-## Method 2: Command Line Approach
+## Method 2: Using Pull Requests Within Your Fork
+
+**Alternative approach**: Create a pull request **within your own repository** to pull updates from the course repo.
+
+### Step-by-Step Process:
+1. **Go to your forked repository** on GitHub (`your-username/Interactive-Lab-Hub`)
+2. **Click on "Pull requests"** tab
+3. **Click "New pull request"** button
+4. **Set the repositories correctly**:
+ - **Base repository**: `your-username/Interactive-Lab-Hub` (your fork)
+ - **Head repository**: `IRL-CT/Interactive-Lab-Hub` (the course repo)
+5. **If needed**: Click the blue **"compare across forks"** link to see cross-fork options
+
+
+
+1. **Make sure branches match**: Usually both should be `Fall2025` (the current semester)
+2. **Click "Create pull request"**
+3. **Add a title**: e.g., "Pull course updates - Lab 2"
+4. **Click "Create pull request"** again
+5. **Click "Merge pull request"** to complete the update
+6. **Click "Confirm merge"**
+
+### When to Use This Method:
+- When the "Sync fork" button isn't available
+- When you prefer more control over the merge process
+- When you want to review changes before merging
+- When working with the traditional GitHub workflow
+
+**Reference**: This follows the process described in [the original course documentation](https://github.com/IRL-CT/Developing-and-Designing-Interactive-Devices/blob/2023Fall/readings/Submitting%20Labs.md)
+
+## Method 3: Command Line Approach
### Step 1: Add Upstream Remote (One-time setup)
```bash
diff --git a/Lab 2/pull_updates/pull_into_own_repo_request.png b/Lab 2/pull_updates/pull_into_own_repo_request.png
new file mode 100644
index 0000000000..157ce337e8
Binary files /dev/null and b/Lab 2/pull_updates/pull_into_own_repo_request.png differ
diff --git a/Lab 2/red.jpg b/Lab 2/red.jpg
new file mode 100644
index 0000000000..53b66834c3
Binary files /dev/null and b/Lab 2/red.jpg differ
diff --git a/Lab 2/requirements.txt b/Lab 2/requirements.txt
new file mode 100644
index 0000000000..0ff3f6d3d6
--- /dev/null
+++ b/Lab 2/requirements.txt
@@ -0,0 +1,20 @@
+Adafruit-Blinka==8.64.0
+adafruit-circuitpython-apds9960==3.1.16
+adafruit-circuitpython-busdevice==5.2.13
+adafruit-circuitpython-connectionmanager==3.1.5
+adafruit-circuitpython-register==1.10.4
+adafruit-circuitpython-requests==4.1.13
+adafruit-circuitpython-rgb-display==3.14.1
+adafruit-circuitpython-typing==1.12.1
+Adafruit-PlatformDetect==3.82.0
+Adafruit-PureIO==1.1.11
+binho-host-adapter==0.1.6
+lgpio==0.2.2.0
+pillow==11.3.0
+pip-chill==1.0.3
+pyftdi==0.57.1
+pyserial==3.5
+pyusb==1.3.1
+sysv-ipc==1.1.0
+typing_extensions==4.15.0
+webcolors==24.11.1
diff --git a/Lab 2/screen_boot_script.py b/Lab 2/screen_boot_script.py
new file mode 100644
index 0000000000..224ecceec6
--- /dev/null
+++ b/Lab 2/screen_boot_script.py
@@ -0,0 +1,134 @@
+# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
+# SPDX-License-Identifier: MIT
+
+# -*- coding: utf-8 -*-
+
+import time
+import subprocess
+import digitalio
+import board
+from PIL import Image, ImageDraw, ImageFont
+from adafruit_rgb_display import st7789
+
+
+# Configuration for CS and DC pins (these are FeatherWing defaults on M0/M4):
+cs_pin = digitalio.DigitalInOut(board.D5)
+dc_pin = digitalio.DigitalInOut(board.D25)
+reset_pin = None
+
+# Config for display baudrate (default max is 24mhz):
+BAUDRATE = 64000000
+
+# Setup SPI bus using hardware SPI:
+spi = board.SPI()
+
+# Create the ST7789 display:
+disp = st7789.ST7789(
+ spi,
+ cs=cs_pin,
+ dc=dc_pin,
+ rst=reset_pin,
+ baudrate=BAUDRATE,
+ width=135,
+ height=240,
+ x_offset=53,
+ y_offset=40,
+)
+
+# Create blank image for drawing.
+# Make sure to create image with mode 'RGB' for full color.
+height = disp.width # we swap height/width to rotate it to landscape!
+width = disp.height
+image = Image.new("RGB", (width, height))
+rotation = 90
+
+# Get drawing object to draw on image.
+draw = ImageDraw.Draw(image)
+
+# Draw a black filled box to clear the image.
+draw.rectangle((0, 0, width, height), outline=0, fill=(0, 0, 0))
+disp.image(image, rotation)
+# Draw some shapes.
+# First define some constants to allow easy resizing of shapes.
+padding = -2
+top = padding
+bottom = height - padding
+# Move left to right keeping track of the current x position for drawing shapes.
+x = 0
+
+
+# Alternatively load a TTF font. Make sure the .ttf font file is in the
+# same directory as the python script!
+# Some other nice fonts to try: http://www.dafont.com/bitmap.php
+font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", 24)
+
+# Turn on the backlight
+backlight = digitalio.DigitalInOut(board.D22)
+backlight.switch_to_output()
+backlight.value = True
+
+# Button configuration
+buttonA = digitalio.DigitalInOut(board.D23)
+buttonB = digitalio.DigitalInOut(board.D24)
+buttonA.switch_to_input()
+buttonB.switch_to_input()
+
+mac_scroll_position = 0
+cpu_mem_disk_scroll_position = 0
+
+while True:
+ # Draw a black filled box to clear the image.
+ draw.rectangle((0, 0, width, height), outline=0, fill=0)
+
+ # Check button presses
+ if not buttonA.value: # Button A pressed (power off)
+ draw.text((0, 0), "Shutdown " * 10, font=font, fill="#FF0000")
+ disp.image(image, rotation)
+ subprocess.run(['sudo', 'poweroff'])
+ elif not buttonB.value: # Button B pressed (restart)
+ draw.text((0, 0), "Reboot " * 10, font=font, fill="#0000FF")
+ disp.image(image, rotation)
+ subprocess.run(['sudo', 'reboot'])
+
+ y = top
+
+ # IP Address
+ cmd = "hostname -I | cut -d' ' -f1"
+ IP = "IP: " + subprocess.check_output(cmd, shell=True).decode("utf-8")
+ draw.text((x, y), IP, font=font, fill="#FFFFFF")
+ y += font.size
+
+ # Network Name
+ try:
+ cmd = "iwgetid -r"
+ Network = "Net: " + subprocess.check_output(cmd, shell=True).decode("utf-8").strip()
+ except subprocess.CalledProcessError:
+ Network = "Net: Error fetching network name"
+
+
+ draw.text((x, y), Network, font=font, fill="#FFFFFF")
+ y += font.size
+
+
+ # MAC Address
+ MAC = "MAC: " + subprocess.check_output("cat /sys/class/net/wlan0/address", shell=True).decode("utf-8").strip()
+ draw.text((x - mac_scroll_position, y), MAC, font=font, fill="#FFFFFF")
+ y += font.size
+ mac_scroll_position = (mac_scroll_position + 5) % font.size
+
+ # CPU Usage, Memory and Disk Usage
+ CPU = subprocess.check_output("top -bn1 | grep load | awk '{printf \"CPU Load: %.2f\", $(NF-2)}'", shell=True).decode("utf-8")
+ MemUsage = subprocess.check_output("free -m | awk 'NR==2{printf \"Mem: %s/%s MB %.2f%%\", $3,$2,$3*100/$2 }'", shell=True).decode("utf-8")
+ Disk = subprocess.check_output('df -h | awk \'$NF=="/"{printf "Disk: %d/%d GB %s", $3,$2,$5}\'', shell=True).decode("utf-8")
+ CPUMemDisk = CPU + " | " + MemUsage + " | " + Disk
+ draw.text((x - cpu_mem_disk_scroll_position, y), CPUMemDisk, font=font, fill="#00FF00")
+ y += font.size
+ cpu_mem_disk_scroll_position = (cpu_mem_disk_scroll_position + 5) % font.size
+
+ # CPU Temperature
+ Temp = subprocess.check_output("cat /sys/class/thermal/thermal_zone0/temp | awk '{printf \"CPU Temp: %.1f C\", $(NF-0) / 1000}'", shell=True).decode("utf-8")
+ draw.text((x, y), Temp, font=font, fill="#FF00FF")
+
+ # Display image.
+ disp.image(image, rotation)
+ time.sleep(0.2) # Adjust the speed of the scrolling text
\ No newline at end of file
diff --git a/Lab 2/screen_clock.py b/Lab 2/screen_clock.py
new file mode 100644
index 0000000000..bbb5289a00
--- /dev/null
+++ b/Lab 2/screen_clock.py
@@ -0,0 +1,204 @@
+import time
+import subprocess
+import digitalio
+import board
+from PIL import Image, ImageDraw, ImageFont
+import adafruit_rgb_display.st7789 as st7789
+from adafruit_rgb_display.rgb import color565
+import colorsys
+import calendar
+import math
+
+# Configuration for CS and DC pins (these are FeatherWing defaults on M0/M4):
+cs_pin = digitalio.DigitalInOut(board.D5)
+dc_pin = digitalio.DigitalInOut(board.D25)
+reset_pin = None
+
+# Config for display baudrate (default max is 24mhz):
+BAUDRATE = 64000000
+
+# Setup SPI bus using hardware SPI:
+spi = board.SPI()
+
+# Create the ST7789 display:
+disp = st7789.ST7789(
+ spi,
+ cs=cs_pin,
+ dc=dc_pin,
+ rst=reset_pin,
+ baudrate=BAUDRATE,
+ width=135,
+ height=240,
+ x_offset=53,
+ y_offset=40,
+)
+
+# Button setup
+btnA = digitalio.DigitalInOut(board.D23)
+btnB = digitalio.DigitalInOut(board.D24)
+
+# Set buttons as inputs, with a pull-up resistor to avoid floating inputs
+btnA.direction = digitalio.Direction.INPUT
+btnB.direction = digitalio.Direction.INPUT
+btnA.pull = digitalio.Pull.UP
+btnB.pull = digitalio.Pull.UP
+
+# Create blank image for drawing.
+# Make sure to create image with mode 'RGB' for full color.
+height = disp.width # we swap height/width to rotate it to landscape!
+width = disp.height
+image = Image.new("RGB", (width, height))
+rotation = 90
+
+# Get drawing object to draw on image.
+draw = ImageDraw.Draw(image)
+
+# Draw a black filled box to clear the image.
+draw.rectangle((0, 0, width, height), outline=0, fill=(0, 0, 0))
+disp.image(image, rotation)
+
+# Font for the X
+font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", 10)
+
+# Font for the axis labels
+label_font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", 8)
+
+# A font for the key and title
+key_font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", 12)
+
+# Turn on the backlight
+backlight = digitalio.DigitalInOut(board.D22)
+backlight.switch_to_output()
+backlight.value = True
+
+def draw_axes_clock():
+ # Clear screen with white background
+ draw.rectangle((0, 0, width, height), outline=0, fill=(255, 255, 255))
+
+ # Borders and padding
+ border_width = 2
+ padding_left = 25
+ padding_right = 15
+ padding_top = 15
+ padding_bottom = 15
+
+ # Border around the entire display area
+ draw.rectangle((0, 0, width - 1, height - 1), outline=color565(255, 0, 0), width=border_width)
+
+ # Plotting area (within padding)
+ plot_left = padding_left
+ plot_top = padding_top
+ plot_right = width - padding_right
+ plot_bottom = height - padding_bottom
+
+ plotting_width = plot_right - plot_left
+ plotting_height = plot_bottom - plot_top
+
+ # Axis colors and thickness
+ axis_color = color565(0, 0, 0) # Black
+ line_thickness = 1
+
+ # Horizontal axis (minutes)
+ draw.line((plot_left, plot_bottom, plot_right, plot_bottom), fill=axis_color, width=line_thickness)
+
+ # Vertical axis (hours)
+ draw.line((plot_left, plot_bottom, plot_left, plot_top), fill=axis_color, width=line_thickness)
+
+ # Markers and labels to the axes
+ marker_size = 3
+ text_offset = 2
+
+ # Minute markers (every 15 minutes)
+ for minute_val in range(0, 61, 15):
+ x_pos = plot_left + int(minute_val / 60 * plotting_width)
+
+ draw.line((x_pos, plot_bottom - marker_size, x_pos, plot_bottom + marker_size), fill=axis_color, width=line_thickness)
+
+ text_bbox = draw.textbbox((0,0), str(minute_val), font=label_font)
+ text_width = text_bbox[2] - text_bbox[0]
+
+ if minute_val == 60:
+ draw.text((x_pos - text_width, plot_bottom + text_offset), str(minute_val), font=label_font, fill=axis_color)
+ else:
+ draw.text((x_pos - text_width/2, plot_bottom + text_offset), str(minute_val), font=label_font, fill=axis_color)
+
+ # Hour markers (every 3 hours)
+ for hour_val in range(0, 13, 3):
+ y_pos = plot_bottom - int(hour_val / 12 * plotting_height)
+
+ draw.line((plot_left - marker_size, y_pos, plot_left + marker_size, y_pos), fill=axis_color, width=line_thickness)
+
+ hour_label = str(hour_val)
+ if hour_val == 12:
+ hour_label = "12"
+ elif hour_val == 0:
+ hour_label = "0"
+
+ text_bbox = draw.textbbox((0,0), hour_label, font=label_font)
+ text_width = text_bbox[2] - text_bbox[0]
+ text_height = text_bbox[3] - text_bbox[1]
+
+ draw.text((plot_left - marker_size - text_offset - text_width, y_pos - text_height/2), hour_label, font=label_font, fill=axis_color)
+
+ # Draw the arrowheads
+ arrow_size = 5
+ # Right arrow on X-axis (minutes)
+ draw.polygon([(plot_right, plot_bottom),
+ (plot_right - arrow_size, plot_bottom - arrow_size),
+ (plot_right - arrow_size, plot_bottom + arrow_size)], fill=axis_color)
+ # Top arrow on Y-axis (hours)
+ draw.polygon([(plot_left, plot_top),
+ (plot_left - arrow_size, plot_top + arrow_size),
+ (plot_left + arrow_size, plot_top + arrow_size)], fill=axis_color)
+
+ # Get the current time
+ current_time = time.localtime()
+ current_hour_24 = current_time.tm_hour
+ current_minute = current_time.tm_min
+
+ # Convert to 12-hour format (1-12)
+ hour_12 = current_hour_24 % 12
+ if hour_12 == 0:
+ hour_12 = 12
+
+ # Map minutes (0-59) to X-coordinate
+ x_plot = plot_left + int(current_minute / 59 * plotting_width)
+
+ # Map hours (1-12) to Y-coordinate
+ # Y-axis is inverted: 1 at bottom, 12 at top
+ y_plot = plot_bottom - int((hour_12 - 1) / 11 * plotting_height)
+
+ # Draw 'X' at the calculated position
+ text_bbox_x = draw.textbbox((0,0), 'X', font=font)
+ x_text_offset = (text_bbox_x[2] - text_bbox_x[0]) / 2
+ y_text_offset = (text_bbox_x[3] - text_bbox_x[1]) / 2
+ draw.text((x_plot - x_text_offset, y_plot - y_text_offset), 'X', font=font, fill="#FF0000")
+
+def draw_key_screen():
+ # Clear screen with black background
+ draw.rectangle((0, 0, width, height), outline=0, fill=(0, 0, 0))
+
+ # Define text/ key
+ title = "GRAPH CLOCK"
+ key_text = "X = Time"
+ hours_key = "Y-Axis = Hours"
+ minutes_key = "X-Axis = Minutes"
+
+ # Title (at the top)
+ draw.text((width/2 - 30, 20), title, font=key_font, fill="#FFFFFF")
+
+ # Key (in the center)
+ draw.text((width/2 - 30, height/2 - 20), key_text, font=key_font, fill="#FF0000")
+ draw.text((width/2 - 30, height/2), hours_key, font=key_font, fill="#FFFFFF")
+ draw.text((width/2 - 30, height/2 + 20), minutes_key, font=key_font, fill="#FFFFFF")
+
+ disp.image(image, rotation)
+
+while True:
+ if not btnB.value: # Button B is pressed when its value is False
+ draw_key_screen()
+ else:
+ draw_axes_clock()
+ disp.image(image, rotation)
+
+ time.sleep(0.1)
diff --git a/Lab 2/screen_test.py b/Lab 2/screen_test.py
new file mode 100644
index 0000000000..24c977231f
--- /dev/null
+++ b/Lab 2/screen_test.py
@@ -0,0 +1,87 @@
+# rpi5_minipitft_st7789.py
+# Works on Raspberry Pi 5 with Adafruit Blinka backend (lgpio) and SPI enabled.
+# Wiring change: connect the display's CS to GPIO5 (pin 29), not CE0.
+
+import time
+import digitalio
+import board
+
+from adafruit_rgb_display.rgb import color565
+import adafruit_rgb_display.st7789 as st7789
+import webcolors
+
+# ---------------------------
+# SPI + Display configuration
+# ---------------------------
+# Use a FREE GPIO for CS to avoid conflicts with the SPI driver owning CE0/CE1.
+cs_pin = digitalio.DigitalInOut(board.D5) # GPIO5 (PIN 29) <-- wire display CS here
+dc_pin = digitalio.DigitalInOut(board.D25) # GPIO25 (PIN 22)
+reset_pin = None
+
+# Safer baudrate for stability; you can try 64_000_000 if your wiring is short/clean.
+BAUDRATE = 64000000
+
+# Create SPI object on SPI0 (spidev0.* must exist; enable SPI in raspi-config).
+spi = board.SPI()
+
+# For Adafruit mini PiTFT 1.14" (240x135) ST7789 use width=135, height=240, x/y offsets below.
+# If you actually have a 240x240 panel, set width=240, height=240 and x_offset=y_offset=0.
+display = st7789.ST7789(
+ spi,
+ cs=cs_pin,
+ dc=dc_pin,
+ rst=reset_pin,
+ baudrate=BAUDRATE,
+ width=135,
+ height=240,
+ x_offset=53,
+ y_offset=40,
+ # rotation=0 # uncomment/change if your screen orientation is off
+)
+
+# ---------------------------
+# Backlight + Buttons
+# ---------------------------
+backlight = digitalio.DigitalInOut(board.D22) # GPIO22 (PIN 15)
+backlight.switch_to_output(value=True)
+
+buttonA = digitalio.DigitalInOut(board.D23) # GPIO23 (PIN 16)
+buttonB = digitalio.DigitalInOut(board.D24) # GPIO24 (PIN 18)
+# Use internal pull-ups; buttons then read LOW when pressed.
+buttonA.switch_to_input(pull=digitalio.Pull.UP)
+buttonB.switch_to_input(pull=digitalio.Pull.UP)
+
+# ---------------------------
+# Ask user for a color
+# ---------------------------
+screenColor = None
+while not screenColor:
+ try:
+ name = input('Type the name of a color and hit enter: ')
+ rgb = webcolors.name_to_rgb(name)
+ screenColor = color565(rgb.red, rgb.green, rgb.blue)
+ except ValueError:
+ print("whoops I don't know that one")
+
+# ---------------------------
+# Main loop
+# ---------------------------
+print("Press A for WHITE, B for your color, both to turn backlight OFF.")
+while True:
+ # Buttons are active-LOW because of pull-ups
+ a_pressed = (buttonA.value == False)
+ b_pressed = (buttonB.value == False)
+
+ if a_pressed and b_pressed:
+ backlight.value = False # turn off backlight
+ else:
+ backlight.value = True # turn on backlight
+
+ if b_pressed and not a_pressed:
+ display.fill(screenColor) # user's color
+ elif a_pressed and not b_pressed:
+ display.fill(color565(255, 255, 255)) # white
+ else:
+ display.fill(color565(0, 255, 0)) # green
+
+ time.sleep(0.02) # small debounce / CPU break
diff --git a/Lab 2/stats.py b/Lab 2/stats.py
new file mode 100644
index 0000000000..010cb8410f
--- /dev/null
+++ b/Lab 2/stats.py
@@ -0,0 +1,92 @@
+import time
+import subprocess
+import digitalio
+import board
+from PIL import Image, ImageDraw, ImageFont
+import adafruit_rgb_display.st7789 as st7789
+
+
+# Configuration for CS and DC pins (these are FeatherWing defaults on M0/M4):
+cs_pin = digitalio.DigitalInOut(board.D5)
+dc_pin = digitalio.DigitalInOut(board.D25)
+reset_pin = None
+
+# Config for display baudrate (default max is 24mhz):
+BAUDRATE = 64000000
+
+# Setup SPI bus using hardware SPI:
+spi = board.SPI()
+
+# Create the ST7789 display:
+disp = st7789.ST7789(
+ spi,
+ cs=cs_pin,
+ dc=dc_pin,
+ rst=reset_pin,
+ baudrate=BAUDRATE,
+ width=135,
+ height=240,
+ x_offset=53,
+ y_offset=40,
+)
+
+# Create blank image for drawing.
+# Make sure to create image with mode 'RGB' for full color.
+height = disp.width # we swap height/width to rotate it to landscape!
+width = disp.height
+image = Image.new("RGB", (width, height))
+rotation = 90
+
+# Get drawing object to draw on image.
+draw = ImageDraw.Draw(image)
+
+# Draw a black filled box to clear the image.
+draw.rectangle((0, 0, width, height), outline=0, fill=(0, 0, 0))
+disp.image(image, rotation)
+# Draw some shapes.
+# First define some constants to allow easy resizing of shapes.
+padding = -2
+top = padding
+bottom = height - padding
+# Move left to right keeping track of the current x position for drawing shapes.
+x = 0
+
+
+# Alternatively load a TTF font. Make sure the .ttf font file is in the
+# same directory as the python script!
+# Some other nice fonts to try: http://www.dafont.com/bitmap.php
+font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", 24)
+
+# Turn on the backlight
+backlight = digitalio.DigitalInOut(board.D22)
+backlight.switch_to_output()
+backlight.value = True
+
+while True:
+ # Draw a black filled box to clear the image.
+ draw.rectangle((0, 0, width, height), outline=0, fill=0)
+
+ # Shell scripts for system monitoring from here:
+ # https://unix.stackexchange.com/questions/119126/command-to-display-memory-usage-USD-usage-and-WTTR-load
+ cmd = "hostname -I | cut -d' ' -f1"
+ IP = "IP: " + subprocess.check_output(cmd, shell=True).decode("utf-8")
+ cmd = "curl -s wttr.in/?format=2"
+ WTTR = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ cmd = 'curl -s ils.rate.sx/1USD | cut -c1-6'
+ USD = "$1USD = ₪" + subprocess.check_output(cmd, shell=True).decode("utf-8") + "ILS"
+ cmd = "cat /sys/class/thermal/thermal_zone0/temp | awk '{printf \"CPU Temp: %.1f C\", $(NF-0) / 1000}'"
+ Temp = subprocess.check_output(cmd, shell=True).decode("utf-8")
+
+ # Write four lines of text.
+ y = top
+ draw.text((x, y), IP, font=font, fill="#FFFFFF")
+ # y += font.getsize(IP)[1]
+ y += draw.textbbox((0,0), IP, font=font)[3]
+ draw.text((x, y), WTTR, font=font, fill="#FFFF00")
+ y += draw.textbbox((0,0), WTTR, font=font)[3]
+ draw.text((x, y), USD, font=font, fill="#0000FF")
+ y += draw.textbbox((0,0), USD, font=font)[3]
+
+ # Display image.
+ disp.image(image, rotation)
+ time.sleep(0.1)
diff --git a/Lab 3/Deliverables/IMG_2512.mov b/Lab 3/Deliverables/IMG_2512.mov
new file mode 100644
index 0000000000..ba2ed8303b
Binary files /dev/null and b/Lab 3/Deliverables/IMG_2512.mov differ
diff --git a/Lab 3/Deliverables/Ollama Documentation.pdf b/Lab 3/Deliverables/Ollama Documentation.pdf
new file mode 100644
index 0000000000..d479bf2307
Binary files /dev/null and b/Lab 3/Deliverables/Ollama Documentation.pdf differ
diff --git a/Lab 3/Deliverables/Screenshot 2025-09-28 at 00.15.36.png b/Lab 3/Deliverables/Screenshot 2025-09-28 at 00.15.36.png
new file mode 100644
index 0000000000..ad4f83b412
Binary files /dev/null and b/Lab 3/Deliverables/Screenshot 2025-09-28 at 00.15.36.png differ
diff --git a/Lab 3/Deliverables/WordBot StoryBoard.jpg b/Lab 3/Deliverables/WordBot StoryBoard.jpg
new file mode 100644
index 0000000000..6017a1245d
Binary files /dev/null and b/Lab 3/Deliverables/WordBot StoryBoard.jpg differ
diff --git a/Lab 3/Deliverables/Wordbot Script.pdf b/Lab 3/Deliverables/Wordbot Script.pdf
new file mode 100644
index 0000000000..33129c837b
Binary files /dev/null and b/Lab 3/Deliverables/Wordbot Script.pdf differ
diff --git a/Lab 3/Deliverables/final_voice_assistant.py b/Lab 3/Deliverables/final_voice_assistant.py
new file mode 100644
index 0000000000..16b24406a3
--- /dev/null
+++ b/Lab 3/Deliverables/final_voice_assistant.py
@@ -0,0 +1,93 @@
+# ollama_voice_assistant.py
+
+import speech_recognition as sr
+import requests
+import os
+import time
+
+# --- Configuration ---
+MICROPHONE_INDEX = 2
+MODEL_NAME = "phi3:mini"
+OLLAMA_URL = "http://localhost:11434/api/generate"
+
+ENERGY_THRESHOLD = 150
+
+def speak(text):
+ """Uses the espeak command line tool for Text-to-Speech with better parameters."""
+
+ text = text.replace("'", "'\\''")
+ # -v en+f3: Female English voice | -s 150: Speed 150 WPM | -k 15: Pitch/Inflection
+ print(f"AI Speaking: {text}")
+ os.system(f"espeak -v en+f3 -s 150 -k 15 '{text}' 2>/dev/null")
+
+def transcribe_speech():
+ """Listens for user input and converts it to text."""
+ r = sr.Recognizer()
+ try:
+ with sr.Microphone(device_index=MICROPHONE_INDEX) as source:
+ r.adjust_for_ambient_noise(source)
+ r.energy_threshold = ENERGY_THRESHOLD
+
+ print("\nListening... Speak now.")
+ speak("Ready. Ask me anything.")
+
+
+ time.sleep(0.5)
+
+ audio = r.listen(source, timeout=8, phrase_time_limit=15)
+
+ except Exception as e:
+ print(f"Microphone error: {e}. Check MICROPHONE_INDEX ({MICROPHONE_INDEX}).")
+ speak("I am having trouble accessing the microphone.")
+ return None
+
+ try:
+ print("Transcribing via Google Speech Recognition...")
+ text = r.recognize_google(audio)
+ print(f"User Said: {text}")
+ return text
+ except sr.UnknownValueError:
+ print("Could not understand audio.")
+ speak("I didn't catch that. Could you repeat it?")
+ return None
+ except sr.RequestError as e:
+ print(f"Speech recognition service error: {e}")
+ speak("My transcription service is currently unavailable.")
+ return None
+
+def ask_ai(question):
+ """Sends the question to the local Ollama model."""
+ print("Sending request to Ollama...")
+ try:
+ # Long timeout (120 seconds) for the RPi's slow processing
+ response = requests.post(
+ OLLAMA_URL,
+ json={"model": MODEL_NAME, "prompt": question, "stream": False},
+ timeout=120
+ )
+ response.raise_for_status()
+ return response.json().get('response', 'No response received from the model.')
+ except requests.exceptions.RequestException as e:
+ print(f"Error communicating with Ollama: {e}")
+ return "I seem to be having trouble connecting to the AI model on port 11434."
+
+def main_assistant_loop():
+ """Main loop for the voice assistant."""
+ speak("Voice assistant is active. Say 'stop' to quit.")
+ while True:
+ user_text = transcribe_speech()
+
+ if user_text:
+ if "stop" in user_text.lower() or "exit" in user_text.lower() or "quit" in user_text.lower():
+ speak("Goodbye.")
+ print("Exiting assistant.")
+ break
+
+ ai_response = ask_ai(user_text)
+
+ if ai_response:
+ print(f"AI Response: {ai_response}")
+ speak(ai_response)
+
+if __name__ == "__main__":
+ main_assistant_loop()
diff --git a/Lab 3/Deliverables/txt b/Lab 3/Deliverables/txt
new file mode 100644
index 0000000000..8b13789179
--- /dev/null
+++ b/Lab 3/Deliverables/txt
@@ -0,0 +1 @@
+
diff --git a/Lab 3/README.md b/Lab 3/README.md
new file mode 100644
index 0000000000..5fba1e6484
--- /dev/null
+++ b/Lab 3/README.md
@@ -0,0 +1,328 @@
+# Chatterboxes
+**NAMES OF COLLABORATORS HERE**
+[](https://www.youtube.com/embed/Q8FWzLMobx0?start=19)
+
+In this lab, we want you to design interaction with a speech-enabled device--something that listens and talks to you. This device can do anything *but* control lights (since we already did that in Lab 1). First, we want you first to storyboard what you imagine the conversational interaction to be like. Then, you will use wizarding techniques to elicit examples of what people might say, ask, or respond. We then want you to use the examples collected from at least two other people to inform the redesign of the device.
+
+We will focus on **audio** as the main modality for interaction to start; these general techniques can be extended to **video**, **haptics** or other interactive mechanisms in the second part of the Lab.
+
+## Prep for Part 1: Get the Latest Content and Pick up Additional Parts
+
+Please check instructions in [prep.md](prep.md) and complete the setup before class on Wednesday, Sept 23rd.
+
+### Pick up Web Camera If You Don't Have One
+
+Students who have not already received a web camera will receive their [Logitech C270 Webcam](https://www.amazon.com/Logitech-Desktop-Widescreen-Calling-Recording/dp/B004FHO5Y6/ref=sr_1_3?crid=W5QN79TK8JM7&dib=eyJ2IjoiMSJ9.FB-davgIQ_ciWNvY6RK4yckjgOCrvOWOGAG4IFaH0fczv-OIDHpR7rVTU8xj1iIbn_Aiowl9xMdeQxceQ6AT0Z8Rr5ZP1RocU6X8QSbkeJ4Zs5TYqa4a3C_cnfhZ7_ViooQU20IWibZqkBroF2Hja2xZXoTqZFI8e5YnF_2C0Bn7vtBGpapOYIGCeQoXqnV81r2HypQNUzFQbGPh7VqjqDbzmUoloFA2-QPLa5lOctA.L5ztl0wO7LqzxrIqDku9f96L9QrzYCMftU_YeTEJpGA&dib_tag=se&keywords=webcam%2Bc270&qid=1758416854&sprefix=webcam%2Bc270%2Caps%2C125&sr=8-3&th=1) and bluetooth speaker on Wednesday at the beginning of lab. If you cannot make it to class this week, please contact the TAs to ensure you get these.
+
+### Get the Latest Content
+
+As always, pull updates from the class Interactive-Lab-Hub to both your Pi and your own GitHub repo. There are 2 ways you can do so:
+
+**\[recommended\]**Option 1: On the Pi, `cd` to your `Interactive-Lab-Hub`, pull the updates from upstream (class lab-hub) and push the updates back to your own GitHub repo. You will need the *personal access token* for this.
+
+```
+pi@ixe00:~$ cd Interactive-Lab-Hub
+pi@ixe00:~/Interactive-Lab-Hub $ git pull upstream Fall2025
+pi@ixe00:~/Interactive-Lab-Hub $ git add .
+pi@ixe00:~/Interactive-Lab-Hub $ git commit -m "get lab3 updates"
+pi@ixe00:~/Interactive-Lab-Hub $ git push
+```
+
+Option 2: On your your own GitHub repo, [create pull request](https://github.com/FAR-Lab/Developing-and-Designing-Interactive-Devices/blob/2022Fall/readings/Submitting%20Labs.md) to get updates from the class Interactive-Lab-Hub. After you have latest updates online, go on your Pi, `cd` to your `Interactive-Lab-Hub` and use `git pull` to get updates from your own GitHub repo.
+
+## Part 1.
+### Setup
+
+Activate your virtual environment
+
+```
+pi@ixe00:~$ cd Interactive-Lab-Hub
+pi@ixe00:~/Interactive-Lab-Hub $ cd Lab\ 3
+pi@ixe00:~/Interactive-Lab-Hub/Lab 3 $ python3 -m venv .venv
+pi@ixe00:~/Interactive-Lab-Hub $ source .venv/bin/activate
+(.venv)pi@ixe00:~/Interactive-Lab-Hub $
+```
+
+Run the setup script
+```(.venv)pi@ixe00:~/Interactive-Lab-Hub $ pip install -r requirements.txt ```
+
+Next, run the setup script to install additional text-to-speech dependencies:
+```
+(.venv)pi@ixe00:~/Interactive-Lab-Hub/Lab 3 $ ./setup.sh
+```
+
+### Text to Speech
+
+In this part of lab, we are going to start peeking into the world of audio on your Pi!
+
+We will be using the microphone and speaker on your webcamera. In the directory is a folder called `speech-scripts` containing several shell scripts. `cd` to the folder and list out all the files by `ls`:
+
+```
+pi@ixe00:~/speech-scripts $ ls
+Download festival_demo.sh GoogleTTS_demo.sh pico2text_demo.sh
+espeak_demo.sh flite_demo.sh lookdave.wav
+```
+
+You can run these shell files `.sh` by typing `./filename`, for example, typing `./espeak_demo.sh` and see what happens. Take some time to look at each script and see how it works. You can see a script by typing `cat filename`. For instance:
+
+```
+pi@ixe00:~/speech-scripts $ cat festival_demo.sh
+#from: https://elinux.org/RPi_Text_to_Speech_(Speech_Synthesis)#Festival_Text_to_Speech
+```
+You can test the commands by running
+```
+echo "Just what do you think you're doing, Dave?" | festival --tts
+```
+
+Now, you might wonder what exactly is a `.sh` file?
+Typically, a `.sh` file is a shell script which you can execute in a terminal. The example files we offer here are for you to figure out the ways to play with audio on your Pi!
+
+You can also play audio files directly with `aplay filename`. Try typing `aplay lookdave.wav`.
+
+\*\***Write your own shell file to use your favorite of these TTS engines to have your Pi greet you by name.**\*\*
+(This shell file should be saved to your own repo for this lab.)
+See file here (in 'speech-scripts' folder) : https://github.com/ji227/Jesse-Iriah-s-Lab-Hub/blob/Fall2025/Lab%203/speech-scripts/my_greeting.sh
+
+---
+Bonus:
+[Piper](https://github.com/rhasspy/piper) is another fast neural based text to speech package for raspberry pi which can be installed easily through python with:
+```
+pip install piper-tts
+```
+and used from the command line. Running the command below the first time will download the model, concurrent runs will be faster.
+```
+echo 'Welcome to the world of speech synthesis!' | piper \
+ --model en_US-lessac-medium \
+ --output_file welcome.wav
+```
+Check the file that was created by running `aplay welcome.wav`. Many more languages are supported and audio can be streamed dirctly to an audio output, rather than into an file by:
+
+```
+echo 'This sentence is spoken first. This sentence is synthesized while the first sentence is spoken.' | \
+ piper --model en_US-lessac-medium --output-raw | \
+ aplay -r 22050 -f S16_LE -t raw -
+```
+
+### Speech to Text
+
+Next setup speech to text. We are using a speech recognition engine, [Vosk](https://alphacephei.com/vosk/), which is made by researchers at Carnegie Mellon University. Vosk is amazing because it is an offline speech recognition engine; that is, all the processing for the speech recognition is happening onboard the Raspberry Pi.
+
+Make sure you're running in your virtual environment with the dependencies already installed:
+```
+source .venv/bin/activate
+```
+
+Test if vosk works by transcribing text:
+
+```
+vosk-transcriber -i recorded_mono.wav -o test.txt
+```
+
+You can use vosk with the microphone by running
+```
+python test_microphone.py -m en
+```
+
+---
+Bonus:
+[Whisper](https://openai.com/index/whisper/) is a neural network–based speech-to-text (STT) model developed and open-sourced by OpenAI. Compared to Vosk, Whisper generally achieves higher accuracy, particularly on noisy audio and diverse accents. It is available in multiple model sizes; for edge devices such as the Raspberry Pi 5 used in this class, the tiny.en model runs with reasonable latency even without a GPU.
+
+By contrast, Vosk is more lightweight and optimized for running efficiently on low-power devices like the Raspberry Pi. The choice between Whisper and Vosk depends on your scenario: if you need higher accuracy and can afford slightly more compute, Whisper is preferable; if your priority is minimal resource usage, Vosk may be a better fit.
+
+In this class, we provide two Whisper options: A quantized 8-bit faster-whisper model for speed, and the standard Whisper model. Try them out and compare the trade-offs.
+
+Make sure you're in the Lab 3 directory with your virtual environment activated:
+```
+cd ~/Interactive-Lab-Hub/Lab\ 3/speech-scripts
+source ../.venv/bin/activate
+```
+
+Then test the Whisper models:
+```
+python whisper_try.py
+```
+and
+
+```
+python faster_whisper_try.py
+```
+\*\***Write your own shell file that verbally asks for a numerical based input (such as a phone number, zipcode, number of pets, etc) and records the answer the respondent provides.**\*\*
+See file here (in 'speech-scripts' folder) : https://github.com/ji227/Jesse-Iriah-s-Lab-Hub/blob/Fall2025/Lab%203/speech-scripts/numerical_input.sh
+
+### 🤖 NEW: AI-Powered Conversations with Ollama
+
+Want to add intelligent conversation capabilities to your voice projects? **Ollama** lets you run AI models locally on your Raspberry Pi for sophisticated dialogue without requiring internet connectivity!
+
+#### Quick Start with Ollama
+
+**Installation** (takes ~5 minutes):
+```bash
+# Install Ollama
+curl -fsSL https://ollama.com/install.sh | sh
+
+# Download recommended model for Pi 5
+ollama pull phi3:mini
+
+# Install system dependencies for audio (required for pyaudio)
+sudo apt-get update
+sudo apt-get install -y portaudio19-dev python3-dev
+
+# Create separate virtual environment for Ollama (due to pyaudio conflicts)
+cd ollama/
+python3 -m venv ollama_venv
+source ollama_venv/bin/activate
+
+# Install Python dependencies in separate environment
+pip install -r ollama_requirements.txt
+```
+#### Ready-to-Use Scripts
+
+We've created three Ollama integration scripts for different use cases:
+
+**1. Basic Demo** - Learn how Ollama works:
+```bash
+python3 ollama_demo.py
+```
+
+**2. Voice Assistant** - Full speech-to-text + AI + text-to-speech:
+```bash
+python3 ollama_voice_assistant.py
+```
+
+**3. Web Interface** - Beautiful web-based chat with voice options:
+```bash
+python3 ollama_web_app.py
+# Then open: http://localhost:5000
+```
+
+#### Integration in Your Projects
+
+Simple example to add AI to any project:
+```python
+import requests
+
+def ask_ai(question):
+ response = requests.post(
+ "http://localhost:11434/api/generate",
+ json={"model": "phi3:mini", "prompt": question, "stream": False}
+ )
+ return response.json().get('response', 'No response')
+
+# Use it anywhere!
+answer = ask_ai("How should I greet users?")
+```
+
+**📖 Complete Setup Guide**: See `OLLAMA_SETUP.md` for detailed instructions, troubleshooting, and advanced usage!
+
+\*\***Try creating a simple voice interaction that combines speech recognition, Ollama processing, and text-to-speech output. Document what you built and how users responded to it.**\*\*
+See script here ('final_voice_assistant.py in ollama folder) : https://github.com/ji227/Jesse-Iriah-s-Lab-Hub/blob/Fall2025/Lab%203/ollama/final_voice_assistant.py
+See documentation on the script and response here: https://docs.google.com/document/d/1MC8Soh6y-xnqsH4-R49oLbx3axFsuhnrAuuwzcrkruw/edit?tab=t.0
+
+
+### Serving Pages
+
+In Lab 1, we served a webpage with flask. In this lab, you may find it useful to serve a webpage for the controller on a remote device. Here is a simple example of a webserver.
+
+```
+pi@ixe00:~/Interactive-Lab-Hub/Lab 3 $ python server.py
+ * Serving Flask app "server" (lazy loading)
+ * Environment: production
+ WARNING: This is a development server. Do not use it in a production deployment.
+ Use a production WSGI server instead.
+ * Debug mode: on
+ * Running on http://0.0.0.0:5000/ (Press CTRL+C to quit)
+ * Restarting with stat
+ * Debugger is active!
+ * Debugger PIN: 162-573-883
+```
+From a remote browser on the same network, check to make sure your webserver is working by going to `http://
+
+## Hardware Set-Up
+
+For this demo, you will need:
+* your Raspberry Pi,
+* a Qwiic/Stemma Cable,
+* the display (we are just using it for the Qwiic/StemmaQT port. Feel free to use the display in your projects),
+* your accelerometer, and
+* your web camera
+
+
+
+
+
+Plug the display in and connect the accelerometer to the port underneath with your Qwiic connector cable. Plug the web camera into the raspberry pi.
+
+## Software Setup
+
+Ssh on to your Raspberry Pi as we've done previously
+
+`ssh pi@yourHostname.local`
+
+Ensure audio is playing through the aux connector by typing
+
+`sudo raspi-config`
+
+on `system options` hit enter. Go down to `s2 Audio` and hit enter. Select `1 USB Audio` and hit enter. Then navigate to `
+
+
+
+
+
+
+
+
diff --git a/Lab 3/lab1note.md b/Lab 3/lab1note.md
new file mode 100644
index 0000000000..025d193b8e
--- /dev/null
+++ b/Lab 3/lab1note.md
@@ -0,0 +1,15 @@
+# Lab 1 Grading Notes
+
+Here are some notes I took while grading Lab 1:
+
+1. At the start of your README file, please include your collaborator’s name along with their **netID**. Also, list both your registered name on Canvas and your preferred name.
+2. Think of your submission as the documentation of what you’ve done. The golden standard is: if you show the README to someone who has no idea what your project is, they should be able to replicate it by following your instructions.
+3. Format matters. Check the instructions [here](https://docs.github.com/en/get-started/writing-on-github/getting-started-with-writing-and-formatting-on-github/basic-writing-and-formatting-syntax).
+4. Peer evaluation and feedback matter. You should document their comments, and write down their words and netID. You’re also welcome to interview and present to people outside the class.
+5. Don’t put raw videos directly in your Hub—it takes up too much storage. If you clone the Hub, the video will be downloaded to your Pi5, which doesn’t have much space for large files you don’t actually need. The same goes for large images. Instead, upload them to cloud storage such as Google Drive or [Cornell Box](https://it.cornell.edu/box).
+6. Feel free to remove the comments and instructions we provided for the earlier part of the lab. You don’t need to leave them in your file unless you find them necessary. Again, think of the task as documenting your **own progress**.
+7. Here are some good examples of Lab 1. Check how they organize their documentation and how the concept evolves into the actual working prototype:
+ - [Thomas Knoepffler, Rajvi Ranjit Patil, Om Kamath, Laura Moreno](https://github.com/thomknoe/INFO-5345/tree/Fall2025/Lab%201)
+ - [Wenzhuo Ma, Yoyo Wang](https://github.com/mawenzhuo2022/Interactive-Lab-Hub/tree/Fall2025/Lab%201)
+ - [Jully Li, Sirui Wang, Amy Chen](https://github.com/ac3295-lab/Interactive-Lab-Hub/tree/Fall2025/Lab%201)
+ - [Weicong Hong (wh528), Feier Su (fs495)](https://github.com/wendyyh/Interactive-Lab-Hub/blob/Fall2025/Lab%201/README.md)
diff --git a/Lab 3/ollama/OLLAMA_SETUP.md b/Lab 3/ollama/OLLAMA_SETUP.md
new file mode 100644
index 0000000000..cb5dd5bc01
--- /dev/null
+++ b/Lab 3/ollama/OLLAMA_SETUP.md
@@ -0,0 +1,187 @@
+# Ollama Setup Instructions for Lab 3
+*Interactive Device Design - Voice and Speech Prototypes*
+
+## What is Ollama?
+
+Ollama is a tool that lets you run large language models (like ChatGPT, but smaller) locally on your Raspberry Pi. This means your voice assistant can have intelligent conversations without needing internet connectivity for AI processing!
+
+## 🚀 Quick Setup
+
+### Step 1: Install Ollama
+
+Run this command in your terminal:
+
+```bash
+curl -fsSL https://ollama.com/install.sh | sh
+```
+
+### Step 2: Download a Model
+
+We recommend **phi3:mini** for Raspberry Pi 5 - it's fast, lightweight, and smart enough for prototyping:
+
+```bash
+ollama pull phi3:mini
+```
+
+*This will download about 2.2GB, so make sure you have good internet and some patience!*
+
+### Step 3: Test Your Installation
+
+```bash
+ollama run phi3:mini "Hello, introduce yourself!"
+```
+
+You should see a response from the AI model.
+
+### Step 4: Install Python Dependencies
+
+```bash
+pip install requests speechrecognition pyaudio flask flask-socketio
+```
+
+## 🎯 Ready-to-Use Scripts
+
+We've created several scripts for different use cases:
+
+### 1. Simple Demo (`ollama_demo.py`)
+```bash
+python3 ollama_demo.py
+```
+- Basic text chat with Ollama
+- Text-to-speech responses
+- Perfect for understanding how Ollama works
+
+### 2. Voice Assistant (`ollama_voice_assistant.py`)
+```bash
+python3 ollama_voice_assistant.py
+```
+- Full voice interaction (speech-to-text + Ollama + text-to-speech)
+- Natural conversation flow
+- Say "hello" to start, "goodbye" to exit
+
+### 3. Web Interface (`ollama_web_app.py`)
+```bash
+python3 ollama_web_app.py
+```
+- Beautiful web interface at `http://localhost:5000`
+- Chat interface with voice options
+- Great for prototyping web-based voice interactions
+
+## 🔧 Troubleshooting
+
+### "Ollama not responding"
+Make sure Ollama is running:
+```bash
+ollama serve
+```
+Then try your script again.
+
+### "Model not found"
+List available models:
+```bash
+ollama list
+```
+If phi3:mini isn't there, pull it again:
+```bash
+ollama pull phi3:mini
+```
+
+### Speech Recognition Issues
+Make sure your microphone is working:
+```bash
+python3 speech-scripts/test_microphone.py
+```
+
+### Audio Output Issues
+Test with espeak:
+```bash
+espeak "Hello from your Pi"
+```
+
+## 📚 For Your Projects
+
+### Quick Integration Template
+
+```python
+import requests
+
+def ask_ollama(question):
+ response = requests.post(
+ "http://localhost:11434/api/generate",
+ json={
+ "model": "phi3:mini",
+ "prompt": question,
+ "stream": False
+ }
+ )
+ return response.json().get('response', 'Sorry, no response')
+
+# Use it in your project
+answer = ask_ollama("What's the weather like?")
+print(answer)
+```
+
+### Make Your Assistant Specialized
+
+Add a system prompt to make your assistant behave differently:
+
+```python
+def ask_specialized_ollama(question, personality):
+ response = requests.post(
+ "http://localhost:11434/api/generate",
+ json={
+ "model": "phi3:mini",
+ "prompt": question,
+ "system": personality, # This changes behavior!
+ "stream": False
+ }
+ )
+ return response.json().get('response', 'Sorry, no response')
+
+# Examples:
+chef_response = ask_specialized_ollama(
+ "What should I cook?",
+ "You are a helpful chef. Give short, practical cooking advice."
+)
+
+therapist_response = ask_specialized_ollama(
+ "I'm feeling stressed",
+ "You are a supportive counselor. Be empathetic and encouraging."
+)
+```
+
+## 🎨 Creative Ideas for Your Project
+
+1. **Smart Home Assistant**: "Turn on the lights" → Ollama processes → controls GPIO
+2. **Language Tutor**: Practice conversations in different languages
+3. **Storytelling Device**: Interactive storytelling with AI-generated plots
+4. **Cooking Assistant**: Voice-controlled recipe helper
+5. **Study Buddy**: AI tutor that adapts to your learning style
+6. **Emotion Support**: An empathetic companion for daily check-ins
+7. **Game Master**: AI-powered text adventure games
+8. **Creative Writing Partner**: Collaborative story creation
+
+## 📖 Additional Resources
+
+- [Ollama Documentation](https://docs.ollama.com)
+- [Available Models](https://ollama.com/library) (try different ones!)
+- [Ollama API Reference](https://docs.ollama.com/api)
+
+## 🆘 Getting Help
+
+1. Check the troubleshooting section above
+2. Ask in the class Slack channel
+3. Use WendyTA (mention "@Ollama" in your question)
+4. Office hours with TAs
+
+## 🏆 Pro Tips
+
+1. **Model Size vs Speed**: Smaller models (like phi3:mini) are faster but less capable
+2. **Internet Independence**: Once downloaded, models work offline!
+3. **Experiment**: Try different system prompts to change personality
+4. **Combine with Sensors**: Use Pi sensors + Ollama for context-aware responses
+5. **Memory**: Each conversation is independent - add conversation history if needed
+
+---
+
+*Happy prototyping! Remember: the goal is to rapidly iterate and test ideas with real users.*
\ No newline at end of file
diff --git a/Lab 3/ollama/final_voice_assistant.py b/Lab 3/ollama/final_voice_assistant.py
new file mode 100644
index 0000000000..16b24406a3
--- /dev/null
+++ b/Lab 3/ollama/final_voice_assistant.py
@@ -0,0 +1,93 @@
+# ollama_voice_assistant.py
+
+import speech_recognition as sr
+import requests
+import os
+import time
+
+# --- Configuration ---
+MICROPHONE_INDEX = 2
+MODEL_NAME = "phi3:mini"
+OLLAMA_URL = "http://localhost:11434/api/generate"
+
+ENERGY_THRESHOLD = 150
+
+def speak(text):
+ """Uses the espeak command line tool for Text-to-Speech with better parameters."""
+
+ text = text.replace("'", "'\\''")
+ # -v en+f3: Female English voice | -s 150: Speed 150 WPM | -k 15: Pitch/Inflection
+ print(f"AI Speaking: {text}")
+ os.system(f"espeak -v en+f3 -s 150 -k 15 '{text}' 2>/dev/null")
+
+def transcribe_speech():
+ """Listens for user input and converts it to text."""
+ r = sr.Recognizer()
+ try:
+ with sr.Microphone(device_index=MICROPHONE_INDEX) as source:
+ r.adjust_for_ambient_noise(source)
+ r.energy_threshold = ENERGY_THRESHOLD
+
+ print("\nListening... Speak now.")
+ speak("Ready. Ask me anything.")
+
+
+ time.sleep(0.5)
+
+ audio = r.listen(source, timeout=8, phrase_time_limit=15)
+
+ except Exception as e:
+ print(f"Microphone error: {e}. Check MICROPHONE_INDEX ({MICROPHONE_INDEX}).")
+ speak("I am having trouble accessing the microphone.")
+ return None
+
+ try:
+ print("Transcribing via Google Speech Recognition...")
+ text = r.recognize_google(audio)
+ print(f"User Said: {text}")
+ return text
+ except sr.UnknownValueError:
+ print("Could not understand audio.")
+ speak("I didn't catch that. Could you repeat it?")
+ return None
+ except sr.RequestError as e:
+ print(f"Speech recognition service error: {e}")
+ speak("My transcription service is currently unavailable.")
+ return None
+
+def ask_ai(question):
+ """Sends the question to the local Ollama model."""
+ print("Sending request to Ollama...")
+ try:
+ # Long timeout (120 seconds) for the RPi's slow processing
+ response = requests.post(
+ OLLAMA_URL,
+ json={"model": MODEL_NAME, "prompt": question, "stream": False},
+ timeout=120
+ )
+ response.raise_for_status()
+ return response.json().get('response', 'No response received from the model.')
+ except requests.exceptions.RequestException as e:
+ print(f"Error communicating with Ollama: {e}")
+ return "I seem to be having trouble connecting to the AI model on port 11434."
+
+def main_assistant_loop():
+ """Main loop for the voice assistant."""
+ speak("Voice assistant is active. Say 'stop' to quit.")
+ while True:
+ user_text = transcribe_speech()
+
+ if user_text:
+ if "stop" in user_text.lower() or "exit" in user_text.lower() or "quit" in user_text.lower():
+ speak("Goodbye.")
+ print("Exiting assistant.")
+ break
+
+ ai_response = ask_ai(user_text)
+
+ if ai_response:
+ print(f"AI Response: {ai_response}")
+ speak(ai_response)
+
+if __name__ == "__main__":
+ main_assistant_loop()
diff --git a/Lab 3/ollama/ollama_demo.py b/Lab 3/ollama/ollama_demo.py
new file mode 100644
index 0000000000..11ff65fbc6
--- /dev/null
+++ b/Lab 3/ollama/ollama_demo.py
@@ -0,0 +1,133 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Simple Ollama Demo for Lab 3
+Basic example of integrating Ollama with voice I/O
+
+This script demonstrates:
+1. Text input to Ollama
+2. Voice input to Ollama
+3. Voice output from Ollama
+"""
+
+import requests
+import json
+import subprocess
+import sys
+import os
+
+# Set UTF-8 encoding for output
+if sys.stdout.encoding != 'UTF-8':
+ import codecs
+ sys.stdout = codecs.getwriter('utf-8')(sys.stdout.buffer, 'strict')
+if sys.stderr.encoding != 'UTF-8':
+ import codecs
+ sys.stderr = codecs.getwriter('utf-8')(sys.stderr.buffer, 'strict')
+
+def speak_text(text):
+ """Simple text-to-speech using espeak"""
+ # Clean text to avoid encoding issues
+ clean_text = text.encode('ascii', 'ignore').decode('ascii')
+ print(f"Assistant: {clean_text}")
+ subprocess.run(['espeak', f'"{clean_text}"'], shell=True, check=False)
+
+def query_ollama(prompt, model="phi3:mini"):
+ """Send a text prompt to Ollama and get response"""
+ try:
+ response = requests.post(
+ "http://localhost:11434/api/generate",
+ json={
+ "model": model,
+ "prompt": prompt,
+ "stream": False
+ },
+ timeout=30
+ )
+
+ if response.status_code == 200:
+ return response.json().get('response', 'No response')
+ else:
+ return f"Error: {response.status_code}"
+
+ except Exception as e:
+ return f"Error: {e}"
+
+def text_chat_demo():
+ """Simple text-based chat with Ollama"""
+ print("\n=== TEXT CHAT DEMO ===")
+ print("Type 'quit' to exit")
+
+ while True:
+ user_input = input("\nYou: ")
+ if user_input.lower() in ['quit', 'exit']:
+ break
+
+ print("Thinking...")
+ response = query_ollama(user_input)
+ print(f"Ollama: {response}")
+
+def voice_response_demo():
+ """Demo: Text input, voice output"""
+ print("\n=== VOICE RESPONSE DEMO ===")
+ print("Type your message, Ollama will respond with voice")
+ print("Type 'quit' to exit")
+
+ while True:
+ user_input = input("\nYour message: ")
+ if user_input.lower() in ['quit', 'exit']:
+ break
+
+ print("Thinking...")
+ response = query_ollama(user_input)
+ speak_text(response)
+
+def check_ollama():
+ """Check if Ollama is running and model is available"""
+ try:
+ response = requests.get("http://localhost:11434/api/tags")
+ if response.status_code == 200:
+ models = response.json().get('models', [])
+ model_names = [m['name'] for m in models]
+ print(f"Ollama is running. Available models: {model_names}")
+ return True
+ else:
+ print("Ollama is not responding")
+ return False
+ except Exception as e:
+ print(f"Cannot connect to Ollama: {e}")
+ print("Make sure Ollama is running with: ollama serve")
+ return False
+
+def main():
+ """Main demo menu"""
+ print("Ollama Lab 3 Demo")
+ print("=" * 30)
+
+ # Check Ollama connection
+ if not check_ollama():
+ return
+
+ while True:
+ print("\nChoose a demo:")
+ print("1. Text Chat (type to Ollama)")
+ print("2. Voice Response (Ollama speaks responses)")
+ print("3. Test Ollama (simple query)")
+ print("4. Exit")
+
+ choice = input("\nEnter choice (1-4): ")
+
+ if choice == "1":
+ text_chat_demo()
+ elif choice == "2":
+ voice_response_demo()
+ elif choice == "3":
+ response = query_ollama("Say hello and introduce yourself briefly")
+ print(f"Ollama: {response}")
+ elif choice == "4":
+ print("Goodbye!")
+ break
+ else:
+ print("Invalid choice. Please try again.")
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/Lab 3/ollama/ollama_requirements.txt b/Lab 3/ollama/ollama_requirements.txt
new file mode 100644
index 0000000000..39a19ff33a
--- /dev/null
+++ b/Lab 3/ollama/ollama_requirements.txt
@@ -0,0 +1,24 @@
+# Requirements for Ollama Integration in Lab 3
+
+# Core dependencies for Ollama integration
+requests>=2.31.0
+flask>=2.3.0
+flask-socketio>=5.3.0
+
+# Voice processing dependencies
+SpeechRecognition>=3.10.0
+pyaudio>=0.2.11
+pyttsx3>=2.90
+
+# Alternative TTS (system-level, installed via apt)
+# espeak (install with: sudo apt-get install espeak)
+
+# Optional: Enhanced audio processing
+# sounddevice>=0.4.6
+# numpy>=1.24.0
+
+# For web interface
+eventlet>=0.33.3
+
+# Note: Ollama itself is installed separately using the install script:
+# curl -fsSL https://ollama.com/install.sh | sh
\ No newline at end of file
diff --git a/Lab 3/ollama/ollama_voice_assistant.py b/Lab 3/ollama/ollama_voice_assistant.py
new file mode 100644
index 0000000000..3e5dfc6afd
--- /dev/null
+++ b/Lab 3/ollama/ollama_voice_assistant.py
@@ -0,0 +1,213 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Ollama Voice Assistant for Lab 3
+Interactive voice assistant using speech recognition, Ollama AI, and text-to-speech
+
+Dependencies:
+- ollama (API client)
+- speech_recognition
+- pyaudio
+- pyttsx3 or espeak
+"""
+
+import speech_recognition as sr
+import subprocess
+import requests
+import json
+import time
+import sys
+import threading
+from queue import Queue
+
+# Set UTF-8 encoding for output
+if sys.stdout.encoding != 'UTF-8':
+ import codecs
+ sys.stdout = codecs.getwriter('utf-8')(sys.stdout.buffer, 'strict')
+if sys.stderr.encoding != 'UTF-8':
+ import codecs
+ sys.stderr = codecs.getwriter('utf-8')(sys.stderr.buffer, 'strict')
+
+try:
+ import pyttsx3
+ TTS_ENGINE = 'pyttsx3'
+except ImportError:
+ TTS_ENGINE = 'espeak'
+ print("pyttsx3 not available, using espeak for TTS")
+
+class OllamaVoiceAssistant:
+ def __init__(self, model_name="phi3:mini", ollama_url="http://localhost:11434"):
+ self.model_name = model_name
+ self.ollama_url = ollama_url
+ self.recognizer = sr.Recognizer()
+ self.microphone = sr.Microphone()
+
+ # Initialize TTS
+ if TTS_ENGINE == 'pyttsx3':
+ self.tts_engine = pyttsx3.init()
+ self.tts_engine.setProperty('rate', 150) # Speed of speech
+
+ # Test Ollama connection
+ self.test_ollama_connection()
+
+ # Adjust for ambient noise
+ print("Adjusting for ambient noise... Please wait.")
+ with self.microphone as source:
+ self.recognizer.adjust_for_ambient_noise(source)
+ print("Ready for conversation!")
+
+ def test_ollama_connection(self):
+ """Test if Ollama is running and the model is available"""
+ try:
+ response = requests.get(f"{self.ollama_url}/api/tags")
+ if response.status_code == 200:
+ models = response.json().get('models', [])
+ model_names = [m['name'] for m in models]
+ if self.model_name in model_names:
+ print(f"Ollama is running with {self.model_name} model")
+ else:
+ print(f"Model {self.model_name} not found. Available models: {model_names}")
+ if model_names:
+ self.model_name = model_names[0]
+ print(f"Using {self.model_name} instead")
+ else:
+ raise Exception("Ollama API not responding")
+ except Exception as e:
+ print(f"Error connecting to Ollama: {e}")
+ print("Make sure Ollama is running: 'ollama serve'")
+ sys.exit(1)
+
+ def speak(self, text):
+ """Convert text to speech"""
+ # Clean text to avoid encoding issues
+ clean_text = text.encode('ascii', 'ignore').decode('ascii')
+ print(f"Assistant: {clean_text}")
+
+ if TTS_ENGINE == 'pyttsx3':
+ self.tts_engine.say(clean_text)
+ self.tts_engine.runAndWait()
+ else:
+ # Use espeak as fallback
+ subprocess.run(['espeak', clean_text], check=False)
+
+ def listen(self):
+ """Listen for speech and convert to text"""
+ try:
+ print("Listening...")
+ with self.microphone as source:
+ # Listen for audio with timeout
+ audio = self.recognizer.listen(source, timeout=5, phrase_time_limit=10)
+
+ print("Recognizing...")
+ # Use Google Speech Recognition (free)
+ text = self.recognizer.recognize_google(audio)
+ print(f"You said: {text}")
+ return text.lower()
+
+ except sr.WaitTimeoutError:
+ print("No speech detected, timing out...")
+ return None
+ except sr.UnknownValueError:
+ print("Could not understand audio")
+ return None
+ except sr.RequestError as e:
+ print(f"Error with speech recognition service: {e}")
+ return None
+
+ def query_ollama(self, prompt, system_prompt=None):
+ """Send a query to Ollama and get response"""
+ try:
+ data = {
+ "model": self.model_name,
+ "prompt": prompt,
+ "stream": False
+ }
+
+ if system_prompt:
+ data["system"] = system_prompt
+
+ response = requests.post(
+ f"{self.ollama_url}/api/generate",
+ json=data,
+ timeout=30
+ )
+
+ if response.status_code == 200:
+ result = response.json()
+ return result.get('response', 'Sorry, I could not generate a response.')
+ else:
+ return f"Error: Ollama API returned status {response.status_code}"
+
+ except requests.exceptions.Timeout:
+ return "Sorry, the response took too long. Please try again."
+ except Exception as e:
+ return f"Error communicating with Ollama: {e}"
+
+ def run_conversation(self):
+ """Main conversation loop"""
+ print("\nOllama Voice Assistant Started!")
+ print("Say 'hello' to start, 'exit' or 'quit' to stop")
+ print("=" * 50)
+
+ # System prompt to make the assistant more conversational
+ system_prompt = """You are a helpful voice assistant. Keep your responses concise and conversational,
+ typically 1-2 sentences. Be friendly and engaging. You are running on a Raspberry Pi as part of an
+ interactive device design lab."""
+
+ self.speak("Hello! I'm your Ollama voice assistant. How can I help you today?")
+
+ while True:
+ try:
+ # Listen for user input
+ user_input = self.listen()
+
+ if user_input is None:
+ continue
+
+ # Check for exit commands
+ if any(word in user_input for word in ['exit', 'quit', 'bye', 'goodbye']):
+ self.speak("Goodbye! Have a great day!")
+ break
+
+ # Check for greeting
+ if any(word in user_input for word in ['hello', 'hi', 'hey']):
+ self.speak("Hello! What would you like to talk about?")
+ continue
+
+ # Send to Ollama for processing
+ print("Thinking...")
+ response = self.query_ollama(user_input, system_prompt)
+
+ # Speak the response
+ self.speak(response)
+
+ except KeyboardInterrupt:
+ print("\nConversation interrupted by user")
+ self.speak("Goodbye!")
+ break
+ except Exception as e:
+ print(f"Unexpected error: {e}")
+ self.speak("Sorry, I encountered an error. Let's try again.")
+
+def main():
+ """Main function to run the voice assistant"""
+ print("Starting Ollama Voice Assistant...")
+
+ # Check if required dependencies are available
+ try:
+ import speech_recognition
+ import requests
+ except ImportError as e:
+ print(f"Missing dependency: {e}")
+ print("Please install with: pip install speechrecognition requests pyaudio")
+ return
+
+ # Create and run the assistant
+ try:
+ assistant = OllamaVoiceAssistant()
+ assistant.run_conversation()
+ except Exception as e:
+ print(f"Failed to start assistant: {e}")
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/Lab 3/ollama/ollama_web_app.py b/Lab 3/ollama/ollama_web_app.py
new file mode 100644
index 0000000000..a5b896f6c5
--- /dev/null
+++ b/Lab 3/ollama/ollama_web_app.py
@@ -0,0 +1,139 @@
+#!/usr/bin/env python3
+"""
+Ollama Flask Web Interface for Lab 3
+Web-based voice assistant using Ollama
+
+This extends the existing Flask app in demo/app.py to include Ollama integration
+"""
+
+import eventlet
+eventlet.monkey_patch()
+
+from flask import Flask, Response, render_template, request, jsonify
+from flask_socketio import SocketIO, send, emit
+import requests
+import json
+import subprocess
+import os
+
+app = Flask(__name__)
+socketio = SocketIO(app, cors_allowed_origins="*")
+
+# Ollama configuration
+OLLAMA_URL = "http://localhost:11434"
+DEFAULT_MODEL = "phi3:mini"
+
+def query_ollama(prompt, model=DEFAULT_MODEL):
+ """Query Ollama and return response"""
+ try:
+ response = requests.post(
+ f"{OLLAMA_URL}/api/generate",
+ json={
+ "model": model,
+ "prompt": prompt,
+ "stream": False
+ },
+ timeout=30
+ )
+
+ if response.status_code == 200:
+ return response.json().get('response', 'No response generated')
+ else:
+ return f"Error: Ollama returned status {response.status_code}"
+
+ except requests.exceptions.Timeout:
+ return "Sorry, the response took too long. Please try again."
+ except Exception as e:
+ return f"Error: {str(e)}"
+
+def speak_text(text):
+ """Text-to-speech using espeak"""
+ try:
+ subprocess.run(['espeak', f'"{text}"'], shell=True, check=False)
+ except Exception as e:
+ print(f"TTS Error: {e}")
+
+@app.route('/')
+def index():
+ """Main web interface"""
+ return render_template('ollama_chat.html')
+
+@app.route('/api/chat', methods=['POST'])
+def chat_api():
+ """REST API endpoint for chat"""
+ data = request.get_json()
+ user_message = data.get('message', '')
+
+ if not user_message:
+ return jsonify({'error': 'No message provided'}), 400
+
+ # Query Ollama
+ response = query_ollama(user_message)
+
+ return jsonify({
+ 'user_message': user_message,
+ 'ai_response': response
+ })
+
+@socketio.on('chat_message')
+def handle_chat_message(data):
+ """Handle chat message via WebSocket"""
+ user_message = data.get('message', '')
+
+ if user_message:
+ # Query Ollama
+ ai_response = query_ollama(user_message)
+
+ # Send response back to client
+ emit('ai_response', {
+ 'user_message': user_message,
+ 'ai_response': ai_response
+ })
+
+@socketio.on('speak_request')
+def handle_speak_request(data):
+ """Handle text-to-speech request"""
+ text = data.get('text', '')
+ if text:
+ speak_text(text)
+ emit('speak_complete', {'text': text})
+
+@socketio.on('voice_chat')
+def handle_voice_chat(data):
+ """Handle voice chat request (text in, voice out)"""
+ user_message = data.get('message', '')
+
+ if user_message:
+ # Query Ollama
+ ai_response = query_ollama(user_message)
+
+ # Speak the response
+ speak_text(ai_response)
+
+ # Send response to client
+ emit('voice_response', {
+ 'user_message': user_message,
+ 'ai_response': ai_response
+ })
+
+@app.route('/status')
+def status():
+ """Check Ollama status"""
+ try:
+ response = requests.get(f"{OLLAMA_URL}/api/tags", timeout=5)
+ if response.status_code == 200:
+ models = response.json().get('models', [])
+ return jsonify({
+ 'status': 'connected',
+ 'models': [m['name'] for m in models],
+ 'current_model': DEFAULT_MODEL
+ })
+ else:
+ return jsonify({'status': 'error', 'message': 'Ollama not responding'}), 500
+ except Exception as e:
+ return jsonify({'status': 'error', 'message': str(e)}), 500
+
+if __name__ == '__main__':
+ print("🚀 Starting Ollama Flask Web Interface...")
+ print("Open your browser to http://localhost:5000")
+ socketio.run(app, host='0.0.0.0', port=5000, debug=True)
\ No newline at end of file
diff --git a/Lab 3/ollama/templates/ollama_chat.html b/Lab 3/ollama/templates/ollama_chat.html
new file mode 100644
index 0000000000..c7e29d38e1
--- /dev/null
+++ b/Lab 3/ollama/templates/ollama_chat.html
@@ -0,0 +1,284 @@
+
+
+
+
+
+ 🤖 Ollama Voice Assistant
+
+
+
+#### Option 2: Command Line Method
+Alternatively, you can pair the X1 speaker from the terminal:
+
+1. **Start Bluetooth control:**
+ ```bash
+ sudo bluetoothctl
+ ```
+
+2. **Enable Bluetooth and make discoverable:**
+ ```bash
+ power on
+ agent on
+ discoverable on
+ scan on
+ ```
+
+3. **Find your X1 speaker (filter devices by name):**
+ ```bash
+ devices
+ ```
+ Then look for a line containing "X1", or use this command to filter:
+ ```bash
+ exit
+ echo 'devices' | bluetoothctl | grep -i "X1"
+ ```
+ This will show something like: `Device XX:XX:XX:XX:XX:XX X1`
+
+4. **Pair with your X1 (replace XX:XX:XX:XX:XX:XX with your device's MAC address):**
+ ```bash
+ bluetoothctl
+ pair XX:XX:XX:XX:XX:XX
+ trust XX:XX:XX:XX:XX:XX
+ connect XX:XX:XX:XX:XX:XX
+ exit
+ ```
+
+You should hear a "beep" from the speaker when successfully connected.
+
+### Set up the Web camera
+
+1. stay in the VNC, open terminal and run this commandline when on VNC or pi connect:
+
+ ```
+ $ sudo apt install pavucontrol
+ ```
+2. open pavucontrol through this commandline. You should see an GUI open.
+
+ ```
+ $ pavucontrol
+ ```
+
+3. Navigate to the Configuration, make sure the profile of the C270 Webcam is Mono Input
+4. Navigate to the Input Devices, you should see a bar moving as you speak - which means you have set up correctly
+
diff --git a/Lab 3/requirements.txt b/Lab 3/requirements.txt
new file mode 100644
index 0000000000..83145d85ed
--- /dev/null
+++ b/Lab 3/requirements.txt
@@ -0,0 +1,102 @@
+addict==2.4.0
+annotated-types==0.7.0
+attrs==25.3.0
+av==15.1.0
+babel==2.17.0
+blis==1.3.0
+catalogue==2.0.10
+certifi==2025.8.3
+cffi==2.0.0
+charset-normalizer==3.4.3
+click==8.3.0
+cloudpathlib==0.22.0
+coloredlogs==15.0.1
+confection==0.1.5
+csvw==3.6.0
+ctranslate2==4.6.0
+curated-tokenizers==0.0.9
+curated-transformers==0.1.1
+cymem==2.0.11
+dlinfo==2.0.0
+docopt==0.6.2
+espeakng-loader==0.2.4
+faster-whisper==1.2.0
+filelock==3.19.1
+flatbuffers==20181003210633
+fsspec==2025.9.0
+hf-xet==1.1.10
+huggingface_hub==0.35.0
+humanfriendly==10.0
+idna==3.10
+isodate==0.7.2
+Jinja2==3.1.6
+joblib==1.5.2
+jsonschema==4.25.1
+jsonschema-specifications==2025.9.1
+kittentts @ https://github.com/KittenML/KittenTTS/releases/download/0.1/kittentts-0.1.0-py3-none-any.whl
+langcodes==3.5.0
+language-tags==1.2.0
+language_data==1.3.0
+llvmlite==0.45.0
+marisa-trie==1.3.1
+markdown-it-py==4.0.0
+MarkupSafe==3.0.2
+mdurl==0.1.2
+misaki==0.9.4
+more-itertools==10.8.0
+mpmath==1.3.0
+murmurhash==1.0.13
+networkx==3.5
+num2words==0.5.14
+numba==0.62.0
+numpy==2.3.3
+onnxruntime==1.22.1
+openai-whisper==20250625
+packaging==25.0
+phonemizer-fork==3.3.2
+piper-tts==1.3.0
+preshed==3.0.10
+protobuf==6.32.1
+pycparser==2.23
+pydantic==2.11.9
+pydantic_core==2.33.2
+Pygments==2.19.2
+pyparsing==3.2.4
+python-dateutil==2.9.0.post0
+PyYAML==6.0.2
+rdflib==7.1.4
+referencing==0.36.2
+regex==2025.9.1
+requests==2.32.5
+rfc3986==1.5.0
+rich==14.1.0
+rpds-py==0.27.1
+segments==2.3.0
+shellingham==1.5.4
+six==1.17.0
+smart_open==7.3.1
+sounddevice==0.5.2
+soundfile==0.13.1
+spacy==3.8.7
+spacy-curated-transformers==0.3.1
+spacy-legacy==3.0.12
+spacy-loggers==1.0.5
+srsly==2.5.1
+srt==3.5.3
+sympy==1.14.0
+termcolor==3.1.0
+thinc==8.3.6
+tiktoken==0.11.0
+tokenizers==0.22.0
+torch==2.8.0
+tqdm==4.67.1
+typer==0.17.4
+typing-inspection==0.4.1
+typing_extensions==4.15.0
+uritemplate==4.2.0
+urllib3==2.5.0
+vosk==0.3.45
+wasabi==1.1.3
+weasel==0.4.1
+websockets==15.0.1
+wrapt==1.17.3
diff --git a/Lab 3/server.py b/Lab 3/server.py
new file mode 100644
index 0000000000..ae33946c36
--- /dev/null
+++ b/Lab 3/server.py
@@ -0,0 +1,12 @@
+from flask import Flask
+
+app = Flask(__name__)
+
+@app.route('/')
+def index():
+ return 'Hello world'
+
+if __name__ == '__main__':
+ app.run(debug=True, host='0.0.0.0')
+
+
diff --git a/Lab 3/setup.sh b/Lab 3/setup.sh
new file mode 100644
index 0000000000..606ca0baab
--- /dev/null
+++ b/Lab 3/setup.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+# Function to print a message and install a package
+install_package() {
+ echo "Installing $1..."
+ shift # Shift to get the rest of the arguments
+ echo "Y" | "$@" # Run the command, piping 'Y' for approval
+ echo "$1 installed!"
+}
+
+# Install pip package
+# echo "Installing piper-tts via pip for local user..."
+# pip install piper-tts --user
+# echo "piper-tts installed!"
+
+# Install packages using apt-get
+install_package "festival" sudo apt-get install festival
+install_package "espeak" sudo apt-get install espeak
+install_package "mplayer" sudo apt-get install mplayer
+install_package "mpg123" sudo apt-get install mpg123
+install_package "libttspico-utils" sudo apt-get install libttspico-utils
+
+# Change all scripts in the subfolder 'speech-scripts' to be executable
+echo "Making all scripts in the 'speech-scripts' subfolder executable..."
+chmod u+x ./speech-scripts/*
+echo "Scripts are now executable!"
+
+echo "All tasks completed!"
diff --git a/Lab 3/speech-scripts/GoogleTTS_demo.sh b/Lab 3/speech-scripts/GoogleTTS_demo.sh
new file mode 100755
index 0000000000..b6728c73b6
--- /dev/null
+++ b/Lab 3/speech-scripts/GoogleTTS_demo.sh
@@ -0,0 +1,7 @@
+#https://elinux.org/RPi_Text_to_Speech_(Speech_Synthesis)
+
+#!/bin/bash
+say() { local IFS=+;/usr/bin/mplayer -ao alsa -really-quiet -noconsolecontrols "http://translate.google.com/translate_tts?ie=UTF-8&client=tw-ob&q=$*&tl=en"; }
+#say $*
+say " This mission is too important for me to allow you to jeopardize it."
+
diff --git a/Lab 3/speech-scripts/check_words_example/recorded_mono.wav b/Lab 3/speech-scripts/check_words_example/recorded_mono.wav
new file mode 100644
index 0000000000..e3548201f3
Binary files /dev/null and b/Lab 3/speech-scripts/check_words_example/recorded_mono.wav differ
diff --git a/Lab 3/speech-scripts/check_words_example/test_words.py b/Lab 3/speech-scripts/check_words_example/test_words.py
new file mode 100644
index 0000000000..01922fe17f
--- /dev/null
+++ b/Lab 3/speech-scripts/check_words_example/test_words.py
@@ -0,0 +1,63 @@
+import os
+import wave
+import json
+import glob
+from vosk import Model, KaldiRecognizer
+
+final_text_buffer = []
+
+# Define cache model directory and check if the model is in cache
+cache_model_path = os.path.expanduser("~/.cache/vosk/vosk-model-small-en-us-0.15")
+if not os.path.exists(cache_model_path):
+ print("Please run the microphone_test.py first to download the model.")
+ exit(1)
+
+# Find the most recently created WAV file in the current directory
+wav_files = glob.glob('*.wav')
+if not wav_files:
+ print("No WAV files found in the current directory.")
+ exit(1)
+
+# Get the last created WAV file
+latest_wav_file = max(wav_files, key=os.path.getctime)
+
+# Load the latest WAV file
+wf = wave.open(latest_wav_file, "rb")
+if wf.getnchannels() != 1 or wf.getsampwidth() != 2 or wf.getcomptype() != "NONE":
+ print("Audio file must be WAV format mono PCM.")
+ exit(1)
+
+# Set up recognizer with the model from the cache
+model = Model(cache_model_path)
+rec = KaldiRecognizer(model, wf.getframerate())
+
+# Process the audio file
+while True:
+ data = wf.readframes(4000)
+ if len(data) == 0:
+ break
+ if rec.AcceptWaveform(data):
+ print(rec.Result())
+ else:
+ print(rec.PartialResult())
+
+last = json.loads(rec.FinalResult()).get("text", "").strip()
+if last:
+ print(f"Final: {last}")
+ final_text_buffer.append(last)
+
+final_text = ""
+if final_text_buffer:
+ print("Transcript (joined):")
+ final_text = " ".join(final_text_buffer)
+
+print("Final Recognized Text: ", final_text)
+
+# Check if any of the predefined words are in the recognized text
+words_list = ["oh", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "zero"]
+
+for word in words_list:
+ if word in final_text.split():
+ print(f"The word '{word}' is in the recognized text.")
+ else:
+ print(f"The word '{word}' is not in the recognized text.")
diff --git a/Lab 3/speech-scripts/check_words_example/test_words_old.py b/Lab 3/speech-scripts/check_words_example/test_words_old.py
new file mode 100644
index 0000000000..e755c27c7c
--- /dev/null
+++ b/Lab 3/speech-scripts/check_words_example/test_words_old.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python3
+
+from vosk import Model, KaldiRecognizer
+import sys
+import os
+import wave
+
+if not os.path.exists("model"):
+ print ("Please download the model from https://github.com/alphacep/vosk-api/blob/master/doc/models.md and unpack as 'model' in the current folder.")
+ exit (1)
+
+wf = wave.open(sys.argv[1], "rb")
+if wf.getnchannels() != 1 or wf.getsampwidth() != 2 or wf.getcomptype() != "NONE":
+ print ("Audio file must be WAV format mono PCM.")
+ exit (1)
+
+model = Model("model")
+# You can also specify the possible word list
+rec = KaldiRecognizer(model, wf.getframerate(), '["oh one two three four five six seven eight nine zero", "[unk]"]')
+
+while True:
+ data = wf.readframes(4000)
+ if len(data) == 0:
+ break
+ if rec.AcceptWaveform(data):
+ print(rec.Result())
+ else:
+ print(rec.PartialResult())
+
+print(rec.FinalResult())
diff --git a/Lab 3/speech-scripts/check_words_example/vosk_demo_mic.sh b/Lab 3/speech-scripts/check_words_example/vosk_demo_mic.sh
new file mode 100644
index 0000000000..f846a87a1f
--- /dev/null
+++ b/Lab 3/speech-scripts/check_words_example/vosk_demo_mic.sh
@@ -0,0 +1,4 @@
+#arecord -f cd -r 16000 -d 5 -t wav recorded.wav && sox recorded.wav recorded_mono.wav remix 1,2
+
+arecord -D hw:2,0 -f cd -c1 -r 48000 -d 5 -t wav recorded_mono.wav
+python3 test_words.py recorded_mono.wav
diff --git a/Lab 3/speech-scripts/espeak_demo.sh b/Lab 3/speech-scripts/espeak_demo.sh
new file mode 100755
index 0000000000..5111e7cdc6
--- /dev/null
+++ b/Lab 3/speech-scripts/espeak_demo.sh
@@ -0,0 +1,4 @@
+
+# from https://elinux.org/RPi_Text_to_Speech_(Speech_Synthesis)
+espeak -ven+f2 -k5 -s150 --stdout "I can make the Pi say anything at all" | aplay
+
diff --git a/Lab 3/speech-scripts/faster_whisper_try.py b/Lab 3/speech-scripts/faster_whisper_try.py
new file mode 100755
index 0000000000..f40b470d44
--- /dev/null
+++ b/Lab 3/speech-scripts/faster_whisper_try.py
@@ -0,0 +1,25 @@
+from faster_whisper import WhisperModel
+
+import time
+
+start_time = time.perf_counter()
+
+model_size = "tiny"
+
+# Run on GPU with FP16
+# model = WhisperModel(model_size, device="cuda", compute_type="float16")
+
+# or run on GPU with INT8
+# model = WhisperModel(model_size, device="cuda", compute_type="int8_float16")
+# or run on CPU with INT8
+model = WhisperModel(model_size, device="cpu", compute_type="int8")
+
+segments, info = model.transcribe("lookdave.wav", beam_size=5)
+
+for segment in segments:
+ print("[%.2fs -> %.2fs] %s" % (segment.start, segment.end, segment.text))
+
+end_time = time.perf_counter()
+
+elapsed_time = end_time - start_time
+print(f"Program executed in {elapsed_time:.6f} seconds")
diff --git a/Lab 3/speech-scripts/festival_demo.sh b/Lab 3/speech-scripts/festival_demo.sh
new file mode 100755
index 0000000000..75d6ab88fa
--- /dev/null
+++ b/Lab 3/speech-scripts/festival_demo.sh
@@ -0,0 +1,4 @@
+#from: https://elinux.org/RPi_Text_to_Speech_(Speech_Synthesis)#Festival_Text_to_Speech
+
+echo "Just what do you think you're doing, Dave?" | festival --tts
+
diff --git a/Lab 3/speech-scripts/lookdave.wav b/Lab 3/speech-scripts/lookdave.wav
new file mode 100755
index 0000000000..dcacf676e8
Binary files /dev/null and b/Lab 3/speech-scripts/lookdave.wav differ
diff --git a/Lab 3/speech-scripts/my_greeting.sh b/Lab 3/speech-scripts/my_greeting.sh
new file mode 100755
index 0000000000..ff78e6a286
--- /dev/null
+++ b/Lab 3/speech-scripts/my_greeting.sh
@@ -0,0 +1 @@
+echo "Greetings, Jesse Iriah. Welcome to Lab Three." | espeak
diff --git a/Lab 3/speech-scripts/numerical_input.sh b/Lab 3/speech-scripts/numerical_input.sh
new file mode 100755
index 0000000000..efc4d4d3b7
--- /dev/null
+++ b/Lab 3/speech-scripts/numerical_input.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+
+# Use TTS to ask the question
+echo "Please state your 5-digit zip code now." | espeak
+
+# Record the response
+arecord -D hw:2,0 -f S16_LE -r 16000 -d 5 numerical_answer.wav
+
+echo "Recording complete. The audio is saved as numerical_answer.wav."
+
+
diff --git a/Lab 3/speech-scripts/pico2text_demo.sh b/Lab 3/speech-scripts/pico2text_demo.sh
new file mode 100755
index 0000000000..f80d7d59cb
--- /dev/null
+++ b/Lab 3/speech-scripts/pico2text_demo.sh
@@ -0,0 +1,4 @@
+# from https://elinux.org/RPi_Text_to_Speech_(Speech_Synthesis)
+
+pico2wave -w lookdave.wav "Look Dave, I can see you're really upset about this." && aplay lookdave.wav
+
diff --git a/Lab 3/speech-scripts/recorded_mono.wav b/Lab 3/speech-scripts/recorded_mono.wav
new file mode 100755
index 0000000000..f4ad47cea0
Binary files /dev/null and b/Lab 3/speech-scripts/recorded_mono.wav differ
diff --git a/Lab 3/speech-scripts/test_microphone.py b/Lab 3/speech-scripts/test_microphone.py
new file mode 100755
index 0000000000..b37dfcd369
--- /dev/null
+++ b/Lab 3/speech-scripts/test_microphone.py
@@ -0,0 +1,90 @@
+#!/usr/bin/env -S /home/pi/Interactive-Lab-Hub/Lab\ 3/.venv/bin/python
+
+
+# prerequisites: as described in https://alphacephei.com/vosk/install and also python module `sounddevice` (simply run command `pip install sounddevice`)
+# Example usage using Dutch (nl) recognition model: `python test_microphone.py -m nl`
+# For more help run: `python test_microphone.py -h`
+
+import argparse
+import queue
+import sys
+import sounddevice as sd
+
+from vosk import Model, KaldiRecognizer
+
+q = queue.Queue()
+
+def int_or_str(text):
+ """Helper function for argument parsing."""
+ try:
+ return int(text)
+ except ValueError:
+ return text
+
+def callback(indata, frames, time, status):
+ """This is called (from a separate thread) for each audio block."""
+ if status:
+ print(status, file=sys.stderr)
+ q.put(bytes(indata))
+
+parser = argparse.ArgumentParser(add_help=False)
+parser.add_argument(
+ "-l", "--list-devices", action="store_true",
+ help="show list of audio devices and exit")
+args, remaining = parser.parse_known_args()
+if args.list_devices:
+ print(sd.query_devices())
+ parser.exit(0)
+parser = argparse.ArgumentParser(
+ description=__doc__,
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ parents=[parser])
+parser.add_argument(
+ "-f", "--filename", type=str, metavar="FILENAME",
+ help="audio file to store recording to")
+parser.add_argument(
+ "-d", "--device", type=int_or_str,
+ help="input device (numeric ID or substring)")
+parser.add_argument(
+ "-r", "--samplerate", type=int, help="sampling rate")
+parser.add_argument(
+ "-m", "--model", type=str, help="language model; e.g. en-us, fr, nl; default is en-us")
+args = parser.parse_args(remaining)
+
+try:
+ if args.samplerate is None:
+ device_info = sd.query_devices(args.device, "input")
+ # soundfile expects an int, sounddevice provides a float:
+ args.samplerate = int(device_info["default_samplerate"])
+
+ if args.model is None:
+ model = Model(lang="en-us")
+ else:
+ model = Model(lang=args.model)
+
+ if args.filename:
+ dump_fn = open(args.filename, "wb")
+ else:
+ dump_fn = None
+
+ with sd.RawInputStream(samplerate=args.samplerate, blocksize = 8000, device=args.device,
+ dtype="int16", channels=1, callback=callback):
+ print("#" * 80)
+ print("Press Ctrl+C to stop the recording")
+ print("#" * 80)
+
+ rec = KaldiRecognizer(model, args.samplerate)
+ while True:
+ data = q.get()
+ if rec.AcceptWaveform(data):
+ print(rec.Result())
+ else:
+ print(rec.PartialResult())
+ if dump_fn is not None:
+ dump_fn.write(data)
+
+except KeyboardInterrupt:
+ print("\nDone")
+ parser.exit(0)
+except Exception as e:
+ parser.exit(type(e).__name__ + ": " + str(e))
diff --git a/Lab 3/speech-scripts/whisper_try.py b/Lab 3/speech-scripts/whisper_try.py
new file mode 100755
index 0000000000..5e8e36a678
--- /dev/null
+++ b/Lab 3/speech-scripts/whisper_try.py
@@ -0,0 +1,16 @@
+
+import time
+
+start_time = time.perf_counter()
+
+import whisper
+
+model = whisper.load_model("tiny")
+result = model.transcribe("lookdave.wav")
+
+print(result["text"])
+
+end_time = time.perf_counter()
+
+elapsed_time = end_time - start_time
+print(f"Program executed in {elapsed_time:.6f} seconds")
diff --git a/README.md b/README.md
index 7f60fa737e..571e0e7fd4 100644
--- a/README.md
+++ b/README.md
@@ -1,13 +1,13 @@
-# [Your name here]'s-Lab-Hub
+# [Jesse Iriah]'s-Lab-Hub
for [Interactive Device Design](https://github.com/FAR-Lab/Developing-and-Designing-Interactive-Devices/)
Please place links here to the README.md's for each of your labs here:
[Lab 1. Staging Interaction](Lab%201/)
-Lab 2. Interactive Prototyping: The Clock of Pi
+[Lab 2. Interactive Prototyping: The Clock of Pi](Lab%202/)
-Lab 3. Chatterboxes
+[Lab 3. Chatterboxes](Lab%203/)
Lab 4. Ph-UI!!!