diff --git a/Lab 1/Feedback/Feedback.pdf b/Lab 1/Feedback/Feedback.pdf new file mode 100644 index 0000000000..dbf02e79ee Binary files /dev/null and b/Lab 1/Feedback/Feedback.pdf differ diff --git a/Lab 1/Feedback/text b/Lab 1/Feedback/text new file mode 100644 index 0000000000..b19b4534c8 --- /dev/null +++ b/Lab 1/Feedback/text @@ -0,0 +1,11 @@ +I presented my seven storyboard ideas to my breakout room group. I explained the plan (setting, players, activity, and goals)for each storyboard and showed my 4–frame sketches. + +**Feedback I received:** +They liked that the interactions were very clear and easy to understand - like a storybook. +They suggested keeping the light colour consistent across frames to avoid confusion (e.g., yellow = warming, green = good, red = bad). +They mentioned that for the sunrise alarm, the gradual brightness idea was intuitive and could work well in real life e.g. at hotels or luxury apartments. + +**What I improved:** +I made sure the light colours in all storyboards were consistent and clearly indicated the right status. +I simplified my sketches to make them easier to follow (fewer background details). +I made the captions shorter and more direct so they fit neatly under each frame. diff --git a/Lab 1/Plan/Page 1.jpg b/Lab 1/Plan/Page 1.jpg new file mode 100644 index 0000000000..1c117c251b Binary files /dev/null and b/Lab 1/Plan/Page 1.jpg differ diff --git a/Lab 1/Plan/Page 2.jpg b/Lab 1/Plan/Page 2.jpg new file mode 100644 index 0000000000..d1b1ca509c Binary files /dev/null and b/Lab 1/Plan/Page 2.jpg differ diff --git a/Lab 1/Plan/Setting, players, activity and goals.pdf b/Lab 1/Plan/Setting, players, activity and goals.pdf new file mode 100644 index 0000000000..c52cea4452 Binary files /dev/null and b/Lab 1/Plan/Setting, players, activity and goals.pdf differ diff --git a/Lab 1/Plan/text b/Lab 1/Plan/text new file mode 100644 index 0000000000..676a3cc6e4 --- /dev/null +++ b/Lab 1/Plan/text @@ -0,0 +1,62 @@ +Storyboard 1 – Charging Status Indicator +Setting: A bedroom nightstand or desk, at night or during the day when the phone needs charging. +Players: The phone owner. +Activity: The user places their phone on the wireless charger and glances at the light to check charging progress. +Goals: +User: To know when the phone is fully charged without unlocking it. +Device: To communicate charging status clearly using light colour. + + +Storyboard 2 – Smart Entryway Weather Indicator +Setting: House entryway. +Players: The person living in the house (and potentially family/housemates). +Activity: The user checks the light above the door before heading outside. +Goals: +User: To know if they need an umbrella, coat, or other gear before going out. +Device: To indicate good or bad weather quickly. + + +Storyboard 3 – Remote Locator +Setting: Living room. +Players: The person watching TV. +Activity: The user notices the remote is missing, uses the app to trigger the light on the remote, and retrieves it. +Goals: +User: To find the remote quickly and avoid wasting time searching. +Device: To make the remote’s location obvious through a glowing light. + + +Storyboard 4 – Smart Mailbox +Setting: Outside the house or at the apartment mailbox area, any time mail is expected. +Players: Resident, mail carrier. +Activity: The resident looks at the mailbox light before walking to it. +Goals: +User: To avoid checking an empty mailbox unnecessarily or to check at night. +Device: To signal whether mail has been delivered (green = empty, red = full). + + +Storyboard 5 – Smart Pot +Setting: Kitchen stove area. +Players: Home cook, pot, stove. +Activity: The user puts water to boil, and the pot detects temperature rise, turning on a light when boiling point is reached. +Goals: +User: To know when water reaches boiling without constantly watching. +Device: To visually alert user with light when boiling is complete. + + +Storyboard 6 – Sunrise Alarm Light +Setting: Bedroom, early morning before wake-up time. +Players: User (sleeping person). +Activity: Light gradually brightens before wake-up time to simulate sunrise. +Goals: +User: To wake up more naturally and gently. +Device: To gradually prepare user’s body for waking by increasing light brightness. + + +Storyboard 7 – Fish Tank Monitor +Setting: Living room or wherever the fish bowl is placed, throughout the day. +Players: Fish, fish owner. +Activity: Light on the lid indicates water quality. Owner refills or cleans water when needed. +Goals: +User: To maintain healthy water for the fish and know when it needs changing. +Device: To alert user to water condition through light colour changes. + diff --git a/Lab 1/Prototypes/Idea: Storyboard 1 - The Charger.heic b/Lab 1/Prototypes/Idea: Storyboard 1 - The Charger.heic new file mode 100644 index 0000000000..63859d97c3 Binary files /dev/null and b/Lab 1/Prototypes/Idea: Storyboard 1 - The Charger.heic differ diff --git a/Lab 1/Prototypes/Idea: Storyboard 1 - The Charger.jpg b/Lab 1/Prototypes/Idea: Storyboard 1 - The Charger.jpg new file mode 100644 index 0000000000..8f08ce69cf Binary files /dev/null and b/Lab 1/Prototypes/Idea: Storyboard 1 - The Charger.jpg differ diff --git a/Lab 1/Prototypes/Idea: Storyboard 1 - The Phone.heic b/Lab 1/Prototypes/Idea: Storyboard 1 - The Phone.heic new file mode 100644 index 0000000000..380fef4aae Binary files /dev/null and b/Lab 1/Prototypes/Idea: Storyboard 1 - The Phone.heic differ diff --git a/Lab 1/Prototypes/Idea: Storyboard 1 - The Phone.jpg b/Lab 1/Prototypes/Idea: Storyboard 1 - The Phone.jpg new file mode 100644 index 0000000000..3e545f08de Binary files /dev/null and b/Lab 1/Prototypes/Idea: Storyboard 1 - The Phone.jpg differ diff --git a/Lab 1/Prototypes/Idea: Storyboard 1 - The Remote.HEIC b/Lab 1/Prototypes/Idea: Storyboard 1 - The Remote.HEIC new file mode 100644 index 0000000000..4f3a679532 Binary files /dev/null and b/Lab 1/Prototypes/Idea: Storyboard 1 - The Remote.HEIC differ diff --git a/Lab 1/Prototypes/Idea: Storyboard 3 - The Remote.jpg b/Lab 1/Prototypes/Idea: Storyboard 3 - The Remote.jpg new file mode 100644 index 0000000000..714149bb9b Binary files /dev/null and b/Lab 1/Prototypes/Idea: Storyboard 3 - The Remote.jpg differ diff --git a/Lab 1/Prototypes/Idea: Storyboard 6 - The Sunrise Light.heic b/Lab 1/Prototypes/Idea: Storyboard 6 - The Sunrise Light.heic new file mode 100644 index 0000000000..50593a81ac Binary files /dev/null and b/Lab 1/Prototypes/Idea: Storyboard 6 - The Sunrise Light.heic differ diff --git a/Lab 1/Prototypes/Idea: Storyboard 6 - The Sunrise Light.jpg b/Lab 1/Prototypes/Idea: Storyboard 6 - The Sunrise Light.jpg new file mode 100644 index 0000000000..ee9f55afbd Binary files /dev/null and b/Lab 1/Prototypes/Idea: Storyboard 6 - The Sunrise Light.jpg differ diff --git a/Lab 1/Prototypes/text b/Lab 1/Prototypes/text new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/Lab 1/Prototypes/text @@ -0,0 +1 @@ + diff --git a/Lab 1/README.md b/Lab 1/README.md index 4686781725..612ea14dde 100644 --- a/Lab 1/README.md +++ b/Lab 1/README.md @@ -1,8 +1,8 @@ - + # Staging Interaction -\*\***NAME OF COLLABORATOR HERE**\*\* +\*\***N/A**\*\* In the original stage production of Peter Pan, Tinker Bell was represented by a darting light created by a small handheld mirror off-stage, reflecting a little circle of light from a powerful lamp. Tinkerbell communicates her presence through this light to the other characters. See more info [here](https://en.wikipedia.org/wiki/Tinker_Bell). @@ -73,23 +73,105 @@ _Goals:_ What are the goals of each player? (e.g., jumping to a tree, opening th The interactive device can be anything *except* a computer, a tablet computer or a smart phone, but the main way it interacts needs to be using light. \*\***Describe your setting, players, activity and goals here.**\*\* +![Page 1](https://github.com/user-attachments/assets/281de04b-c0d5-43a3-9183-0e92ad9bc495) +![Page 2](https://github.com/user-attachments/assets/9605eb55-c705-44d5-9add-ed6d548c833c) + + +**Storyboard 1 – Charging Status Indicator** +Setting: A bedroom nightstand or desk, at night or during the day when the phone needs charging. +Players: The phone owner. +Activity: The user places their phone on the wireless charger and glances at the light to check charging progress. +Goals: + User: To know when the phone is fully charged without unlocking it. + Device: To communicate charging status clearly using light colour. + +**Storyboard 2 – Smart Entryway Weather Indicator** +Setting: House entryway. +Players: The person living in the house (and potentially family/housemates). +Activity: The user checks the light above the door before heading outside. +Goals: + User: To know if they need an umbrella, coat, or other gear before going out. + Device: To indicate good or bad weather quickly. + +**Storyboard 3 – Remote Locator** +Setting: Living room. +Players: The person watching TV. +Activity: The user notices the remote is missing, uses the app to trigger the light on the remote, and retrieves it. +Goals: + User: To find the remote quickly and avoid wasting time searching. + Device: To make the remote’s location obvious through a glowing light. + +**Storyboard 4 – Smart Mailbox** +Setting: Outside the house or at the apartment mailbox area, any time mail is expected. +Players: Resident, mail carrier. +Activity: The resident looks at the mailbox light before walking to it. +Goals: + User: To avoid checking an empty mailbox unnecessarily or to check at night. + Device: To signal whether mail has been delivered (green = empty, red = full). + +**Storyboard 5 – Smart Pot** +Setting: Kitchen stove area. +Players: Home cook, pot, stove. +Activity: The user puts water to boil, and the pot detects temperature rise, turning on a light when boiling point is reached. +Goals: + User: To know when water reaches boiling without constantly watching. + Device: To visually alert user with light when boiling is complete. + +**Storyboard 6 – Sunrise Alarm Light** +Setting: Bedroom, early morning before wake-up time. +Players: User (sleeping person). +Activity: Light gradually brightens before wake-up time to simulate sunrise. +Goals: + User: To wake up more naturally and gently. + Device: To gradually prepare user’s body for waking by increasing light brightness. + +**Storyboard 7 – Fish Tank Monitor** +Setting: Living room or wherever the fish bowl is placed +Players: Fish, fish owner. +Activity: Light on the lid indicates water quality. Owner refills or cleans water when needed. +Goals: + User: To maintain healthy water for the fish and know when it needs changing. + Device: To alert user to water condition through light colour changes. + + Storyboards are a tool for visually exploring a users interaction with a device. They are a fast and cheap method to understand user flow, and iterate on a design before attempting to build on it. Take some time to read through this explanation of [storyboarding in UX design](https://www.smashingmagazine.com/2017/10/storyboarding-ux-design/). Sketch seven storyboards of the interactions you are planning. **It does not need to be perfect**, but must get across the behavior of the interactive device and the other characters in the scene. \*\***Include pictures of your storyboards here**\*\* +![Storyboard 1 - Charging Status Indicator](https://github.com/user-attachments/assets/2fc15da1-2bc2-45c0-8185-03f0244b5669) +![Storyboard 2 - Weather Indicator](https://github.com/user-attachments/assets/d47a7809-f1bc-434e-ad5b-9565212fa008) +![Storyboard 3 - Remote Locator](https://github.com/user-attachments/assets/a7ae9e04-cf3a-405f-9f15-86f0933873ff) +![Storyboard 4 - Smart Mailbox](https://github.com/user-attachments/assets/956fbb86-fdc1-4dd9-bdaa-b9b84c715d0e) +![Storyboard 5 - Smart Pot](https://github.com/user-attachments/assets/9cc9c13b-d71e-4c6f-ae03-8ec9f0223ed6) +![Storyboard 6 - Sunrise Light](https://github.com/user-attachments/assets/d20818a9-ea7d-499b-85e8-0c63cab82e83) +![Storyboard 7 - Fish Tank Monitor](https://github.com/user-attachments/assets/af753d6f-f6a4-48cd-9c7e-2a9f70eb7b82) + Present your ideas to the other people in your breakout room (or in small groups). You can just get feedback from one another or you can work together on the other parts of the lab. \*\***Summarize feedback you got here.**\*\* +I presented my seven storyboard ideas to a small group. I explained the plan (setting, players, activity, and goals)for each storyboard and showed my 4–frame sketches. + +**Feedback I received:** +They liked that the interactions were very clear and easy to understand - like a storybook. +They suggested keeping the light colour consistent across frames to avoid confusion (yellow = warming/ neutral, green = good, red = bad). +They mentioned that for the sunrise alarm, the gradual brightness idea was intuitive and could work well in real life e.g. at hotels or luxury apartments. + +**What I improved:** +I made sure the light colours in all storyboards were consistent and clearly indicated the right status. +I simplified my sketches to make them easier to follow (fewer background details). +I made the captions shorter and more direct so they fit neatly under each frame. ## Part B. Act out the Interaction Try physically acting out the interaction you planned. For now, you can just pretend the device is doing the things you’ve scripted for it. -\*\***Are there things that seemed better on paper than acted out?**\*\* +\*\***Are there things that seemed better on paper than acted out?**\*\* +Yes, some transitions (like charging light going from red to green) needed to be slower and smoother to feel natural. On paper/storyboards it seemed fine to just “switch” colours due to the 'scene' changing, but a gradual transition is more intuitive in reality. -\*\***Are there new ideas that occur to you or your collaborator that come up from the acting?**\*\* +\*\***Are there new ideas that occur to you or your collaborator that come up from the acting?**\*\* +Yes, I thought it would be helpful if the brightness or flash rate could indicate urgency (e.g. flashing faster/higher frequency when the user is very close to the lost remote). ## Part C. Prototype the device @@ -102,19 +184,39 @@ We invented this tool for this lab! If you run into technical issues with this tool, you can also use a light switch, dimmer, etc. that you can can manually or remotely control. -\*\***Give us feedback on Tinkerbelle.**\*\* +\*\***Give us feedback on Tinkerbelle.**\*\* +Tinkerbelle was successful in supporting colour changes during prototyping. An additional feature to allow users to input hex codes or directly manipulate RGB values would improve the precision when selecting specific colours and transitions. A feature to save frequently used colours as presets could also make switching between states more efficient during testing. - -## Part D. Wizard the device +## Part D. Wizard the device Take a little time to set up the wizarding set-up that allows for someone to remotely control the device while someone acts with it. Hint: You can use Zoom to record videos, and you can pin someone’s video feed if that is the scene which you want to record. \*\***Include your first attempts at recording the set-up video here.**\*\* -Now, hange the goal within the same setting, and update the interaction with the paper prototype. + + +Now, change the goal within the same setting, and update the interaction with the paper prototype. \*\***Show the follow-up work here.**\*\* +https://github.com/user-attachments/assets/57b08363-a2df-45cc-8f6b-99578a857abc + + +**Charging Status Indicator: Goal change → ** +Instead of checking whether the phone is fully charged, the user now needs to know if the device is charging at all (e.g. cable is loose, battery is faulty). Yellow previously indicated the device was charging and green indicated that charging was complete. Now, red indicates that no charge is taking place. + + + + + + + +https://github.com/user-attachments/assets/8f74b6bc-adff-4e45-b62c-a2bde9957c3b + + + + + ## Part E. Costume the device Only now should you start worrying about what the device should look like. Develop three costumes so that you can use your phone as this device. @@ -122,17 +224,70 @@ Only now should you start worrying about what the device should look like. Devel Think about the setting of the device: is the environment a place where the device could overheat? Is water a danger? Does it need to have bright colors in an emergency setting? \*\***Include sketches of what your devices might look like here.**\*\* +**Idea/ Storyboard 1: Charging Status Indicator** + +![Sketches](https://github.com/user-attachments/assets/69d3baec-2776-4484-b23a-71481cdd990f) + +The Phone and its Charger: +![Idea: Storyboard 1 - The Phone](https://github.com/user-attachments/assets/ac75540b-2053-46ba-bd3c-11218033fe85) +![Idea: Storyboard 1 - The Charger](https://github.com/user-attachments/assets/eba809ce-b1c7-41b5-97d2-c49f2342208f) + +**Idea / Storyboard 3: Remote Locator** +The Remote: +![Idea: Storyboard 3 - The Remote](https://github.com/user-attachments/assets/b6795614-13cd-494d-96e5-5767c0054016) + +**Idea / Storyboard 6: Sunrise Light** +The Sunrise Light: +![Idea: Storyboard 6 - The Sunrise Light](https://github.com/user-attachments/assets/6063264c-dfea-46c5-b328-622d8a5fa53a) \*\***What concerns or opportunitities are influencing the way you've designed the device to look?**\*\* +**Charging Status Indicator (Storyboard/Idea 1)** +The charger was sketched to resemble popular upright phone chargers, such as MagSafe or Anker stands, with a visible status light at the front. This familiar design makes it intuitive to use and easy to integrate into existing routines. +The phone costume was created by cutting a rectangle from paper to represent the phone and a battery-shaped cut-out in its centre to show charging status. A charger costume was built from cardboard as an open-roof box that the phone can sit inside. The front of the box has a circular cut-out where the Tinkerbelle phone’s light can shine through, displaying yellow for “charging” and green for “fully charged.” This design choise emphasises visibility of status and allows quick recognition of the current charging state. + +**Remote Locator (Storyboard/ Idea 3)** +The remote was kept close to a natural, familiar remote shape, since users are already accustomed to rectangular remotes. For the costume, the remote was first drawn on paper, then reinforced with cardboard so that it would feel more solid and resist bending over when placed in couch cushions. A circular cut-out was added to indicate where the light would appear. The decision to keep the shape and layout simple ensures that users immediately recognise it as a remote, while the white paper surface improves visibility. + +**Sunrise Light (Storyboard/ Idea 6)** +Two designs were sketched: one resembling a hanging bulb to act as a ceiling-mounted light, and another shaped like a cone that could sit on a bedside table. The cone design was chosen for the prototype because it can take on different colours and patterns (similar to star or galaxy projectors ) which creates an opportunity to make waking up a more enjoyable experience. The costume was made by folding an A4 sheet of paper into a cone to resemble a lamp shade. This setup diffuses light evenly, reduces glare, and clearly communicates its function as a sunrise or ambient light. + ## Part F. Record \*\***Take a video of your prototyped interaction.**\*\* +**Charging Status Indicator (Storyboard/Idea 1)** + + +https://github.com/user-attachments/assets/00925fbf-f1ee-49ee-a3be-b61e032543b3 + + + +**Remote Locator (Storyboard/ Idea 3)** + + + +https://github.com/user-attachments/assets/c1ff88bb-b948-4bd6-9daf-79846a9a05e3 + + + + +**Sunrise Light (Storyboard/ Idea 6)** + + + + +https://github.com/user-attachments/assets/540962fb-3488-4f18-ab74-328034f6c090 + + + + + \*\***Please indicate who you collaborated with on this Lab.**\*\* Be generous in acknowledging their contributions! And also recognizing any other influences (e.g. from YouTube, Github, Twitter) that informed your design. +All other group members dropped the course. # Staging Interaction, Part 2 @@ -144,7 +299,10 @@ This describes the second week's work for this lab activity. You will be assigned three partners from other groups. Go to their github pages, view their videos, and provide them with reactions, suggestions & feedback: explain to them what you saw happening in their video. Guess the scene and the goals of the character. Ask them about anything that wasn’t clear. -\*\***Summarize feedback from your partners here.**\*\* +\*\***Summarize feedback from your partners here.**\*\* +They found the captions on both the storyboards and videos very helpful for understanding the intended interactions. They thought the remote locator idea was particularly interesting and suggested it could be extended to other objects such as car keys, house keys, or wallets. They recommended adding vibration or sound feedback, as well as increasing the frequency of the light flashes, haptic pulses, or sound as the user gets closer to the object. + +The sunrise light idea received the most positive feedback. Students from other groups commented that both versions were great and that this approach could provide a gentler, more effective way to wake not just adults, but also children and pets. A suggested extension was to add a “disco mode” or allow the light to double as a projector for playful or decorative use when waking. ## Make it your own @@ -154,3 +312,64 @@ Do last week’s assignment again, but this time: 3) We will be grading with an emphasis on creativity. \*\***Document everything here. (Particularly, we would like to see the storyboard and video, although photos of the prototype are also great.)**\*\* + +## Part A. Plan +\*\***Describe your setting, players, activity and goals here.**\*\* +**Storyboard Plan – Sunrise Alarm** +Setting: Bedroom, early morning before wake-up time. +Players: User (sleeping person). +Activity: Light gradually brightens before wake-up time to simulate sunrise, while a soft chime plays to complement the visual cue. +Goals: + User: To wake up more naturally and gently. + Device: To gradually prepare the user’s body for waking by increasing light brightness and softly signalling with sound. + + +\*\***Include pictures of your storyboards here**\*\* +![Sunrise Alarm - Ceiling](https://github.com/user-attachments/assets/c38ce77f-56d0-4047-954d-2c99546f696a) +![Sunrise Alarm - Table](https://github.com/user-attachments/assets/9ab11e53-8d4f-4116-ad47-7589aff1a506) + + +\*\***Summarize feedback you got here.**\*\* +- Most people felt it would be gentle way to wake up, potentially useful even for children or pets. +- One suggestion was to add extra modes such as a “disco” feature or to have it double as a projector for decorative patterns. + + +## Part B. Act out the Interaction + +Try physically acting out the interaction you planned. For now, you can just pretend the device is doing the things you’ve scripted for it. + +\*\***Are there things that seemed better on paper than acted out?**\*\* +In addition to the gradual change in light brightness from dim to warm white, the accompanying music or chime should also transition smoothly. While the storyboard could show a simple “on/off” cue, implementing gradual changes in both light and sound creates a more natural and gentle wake-up experience. + +\*\***Are there new ideas that occur to you or your collaborator that come up from the acting?**\*\* +The combination of visual and auditory cues was identified as a way to make the interaction more engaging and effective, particularly for users who may be heavy sleepers or for waking children & pets gently. A vibration feature could also be beneficial for the table version. + +## Part C. Prototype the device +See Part F. + +## Part D. Wizard the device +See Part F. + +## Part E. Costume the device +\*\***Include sketches of what your devices might look like here.**\*\* +![Sunrise Alarm Sketches](https://github.com/user-attachments/assets/a4a74f88-6a21-422e-9c9f-b3e232502390) +![Idea: Storyboard 6 - The Sunrise Light](https://github.com/user-attachments/assets/6063264c-dfea-46c5-b328-622d8a5fa53a) +![Sunrise Alarm - Ceiling Prototoype](https://github.com/user-attachments/assets/2cb8dba3-71bc-4cb0-9577-f509aca1cf0d) + + +\*\***What concerns or opportunitities are influencing the way you've designed the device to look?**\*\* +Two designs were initially sketched: one resembling a hanging ceiling bulb, and another shaped like a cone that could sit on a bedside table. The hanging bulb design was chosen for the prototype, since the table-top cone was used in Lab 1a. It was constructed by laying strips of paper in a crisscross pattern — one vertical, one horizontal, and two along the diagonals — and then curving them upward to form a spherical shape. A thin strip of paper was attached to the top to resemble a hanging fixture, similar to a ceiling light. This design diffuses light evenly, reduces glare, and clearly communicates its function as a sunrise or ambient light (similar to the initial sketch), while also creating an opportunity to display soft patterns or colours for a pleasant wake-up experience. + + +## Part F. Record + +\*\***Take a video of your prototyped interaction.**\*\* + + +https://github.com/user-attachments/assets/9a37bf3d-536c-48ca-8d33-0971aa1814a7 + + + + +\*\***Please indicate who you collaborated with on this Lab.**\*\* +All other group members have dropped the course. diff --git a/Lab 1/Recordings/Storyboard- Idea 6 Recording.mp4 b/Lab 1/Recordings/Storyboard- Idea 6 Recording.mp4 new file mode 100644 index 0000000000..3aea958138 Binary files /dev/null and b/Lab 1/Recordings/Storyboard- Idea 6 Recording.mp4 differ diff --git a/Lab 1/Recordings/Storyboard: Idea 1 Recording.mp4 b/Lab 1/Recordings/Storyboard: Idea 1 Recording.mp4 new file mode 100644 index 0000000000..2bd7ac3e25 Binary files /dev/null and b/Lab 1/Recordings/Storyboard: Idea 1 Recording.mp4 differ diff --git a/Lab 1/Recordings/Storyboard: Idea 3 Recording.mp4 b/Lab 1/Recordings/Storyboard: Idea 3 Recording.mp4 new file mode 100644 index 0000000000..ba21a0d120 Binary files /dev/null and b/Lab 1/Recordings/Storyboard: Idea 3 Recording.mp4 differ diff --git a/Lab 1/Recordings/text b/Lab 1/Recordings/text new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/Lab 1/Recordings/text @@ -0,0 +1 @@ + diff --git a/Lab 1/Sketches/Sketches.jpg b/Lab 1/Sketches/Sketches.jpg new file mode 100644 index 0000000000..21aafacd56 Binary files /dev/null and b/Lab 1/Sketches/Sketches.jpg differ diff --git a/Lab 1/Sketches/text b/Lab 1/Sketches/text new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/Lab 1/Sketches/text @@ -0,0 +1 @@ + diff --git a/Lab 1/Storyboards/Storyboard 1 - Charging Status Indicator.jpg b/Lab 1/Storyboards/Storyboard 1 - Charging Status Indicator.jpg new file mode 100644 index 0000000000..e24aa3cae8 Binary files /dev/null and b/Lab 1/Storyboards/Storyboard 1 - Charging Status Indicator.jpg differ diff --git a/Lab 1/Storyboards/Storyboard 2 - Weather Indicator.jpg b/Lab 1/Storyboards/Storyboard 2 - Weather Indicator.jpg new file mode 100644 index 0000000000..d2f19fa2cd Binary files /dev/null and b/Lab 1/Storyboards/Storyboard 2 - Weather Indicator.jpg differ diff --git a/Lab 1/Storyboards/Storyboard 3 - Remote Locator.jpg b/Lab 1/Storyboards/Storyboard 3 - Remote Locator.jpg new file mode 100644 index 0000000000..852db94de4 Binary files /dev/null and b/Lab 1/Storyboards/Storyboard 3 - Remote Locator.jpg differ diff --git a/Lab 1/Storyboards/Storyboard 4 - Smart Mailbox.jpg b/Lab 1/Storyboards/Storyboard 4 - Smart Mailbox.jpg new file mode 100644 index 0000000000..f330f4ff7d Binary files /dev/null and b/Lab 1/Storyboards/Storyboard 4 - Smart Mailbox.jpg differ diff --git a/Lab 1/Storyboards/Storyboard 5 - Smart Pot.jpg b/Lab 1/Storyboards/Storyboard 5 - Smart Pot.jpg new file mode 100644 index 0000000000..236f0e68d3 Binary files /dev/null and b/Lab 1/Storyboards/Storyboard 5 - Smart Pot.jpg differ diff --git a/Lab 1/Storyboards/Storyboard 6 - Sunrise Light.jpg b/Lab 1/Storyboards/Storyboard 6 - Sunrise Light.jpg new file mode 100644 index 0000000000..b1cdcf2cab Binary files /dev/null and b/Lab 1/Storyboards/Storyboard 6 - Sunrise Light.jpg differ diff --git a/Lab 1/Storyboards/Storyboard 7 - Fish Tank Monitor.jpg b/Lab 1/Storyboards/Storyboard 7 - Fish Tank Monitor.jpg new file mode 100644 index 0000000000..74ec07293d Binary files /dev/null and b/Lab 1/Storyboards/Storyboard 7 - Fish Tank Monitor.jpg differ diff --git a/Lab 1/Storyboards/text b/Lab 1/Storyboards/text new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/Lab 1/Storyboards/text @@ -0,0 +1 @@ + diff --git a/Lab 2/Extending the Pi.md b/Lab 2/Extending the Pi.md new file mode 100644 index 0000000000..d8f9242a7d --- /dev/null +++ b/Lab 2/Extending the Pi.md @@ -0,0 +1,123 @@ +# Extending the Pi + +To extend the Pi, we are using breakout boards that connect to the PI using a standard communication bus [I2C](https://learn.sparkfun.com/tutorials/i2c/all). [StemmaQT](https://learn.adafruit.com/introducing-adafruit-stemma-qt/what-is-stemma) and [Qwiic](https://www.sparkfun.com/qwiic#overview) use a standardized 4-pin connector to connect devices using the I2C protocol. + +The StemmaQT and I2C parts often have a fixed I2C address; to differentiate between similar parts, the devices often have pads that allow additional bits to be pulled high or low. The addresses are in [hexidecimal](https://learn.sparkfun.com/tutorials/hexadecimal/introduction) format, things like `0x6f`. This is the hexadecimal (or hex) representation for the decimal number `111` which is represented as `1101111` in binary. You are not expected to make any kinds of conversions but should have some conceptual grasp that a hex value is just a number shown another way. [This Python library](https://towardsdatascience.com/binary-hex-and-octal-in-python-20222488cee1) will assist you if you need help manipulating hexidecimal numbers. + +## Connecting a Button + +The buttons you've used on the screen are quite simple. Aside from [debouncing](https://learn.adafruit.com/make-it-switch/debouncing), when you press down you are closing a circuit, allowing electricity flow to the pins wired to the two buttons, in this case [GPIO 23](https://pinout.xyz/pinout/pin16_gpio23) and [24](https://pinout.xyz/pinout/pin18_gpio24). That's a perfectly reasonable way to connect a button. I2C is not typically used for buttons but here, we demonstrate one way you might see it. This also allows additional functionality to be built right into the button, such as the ability to remember the last time it was pressed. + +### Hardware + +From your kit, take out the [mini-PiTFT](https://learn.adafruit.com/adafruit-mini-pitft-135x240-color-tft-add-on-for-raspberry-pi), a [stemmaQT cable](https://www.adafruit.com/product/4210) and the [Qwiic Button](https://www.sparkfun.com/products/16842).

+ + + +

+ +Connect the one side of cable to the StemmaQT port on the underside of the PiTFT screen. It will only fit in one way, it should not require much force. + +

+ + +

+ +#### Setup +As before, connect to your Pi and activate your virtual environment. + +``` +ssh pi@ixe00 +pi@ixe00:~ $ source circuitpython/bin/activate +(circuitpython) pi@ixe00:~ $ +``` + +On the pi, Navigate to your interactive lab hub, pull changes from upstream, and install new packages. If you have [merge conflicts](https://www.atlassian.com/git/tutorials/using-branches/merge-conflicts), you need to resolve them. If you've never done this before ask people in your group for help. + +``` +(circuitpython) pi@ixe00:~$ cd Interactive-Lab-Hub +(circuitpython) pi@ixe00:~/Interactive-Lab-Hub $ git remote add upstream https://github.com/FAR-Lab/Interactive-Lab-Hub.git +(circuitpython) pi@ixe00:~/Interactive-Lab-Hub $ git pull upstream Fall2022 +(circuitpython) pi@ixe00:~/Interactive-Lab-Hub $ git add . +(circuitpython) pi@ixe00:~/Interactive-Lab-Hub $ git commit -m "merge" +(circuitpython) pi@ixe00:~/Interactive-Lab-Hub $ git push +(circuitpython) pi@ixe00:~/Interactive-Lab-Hub $ cd Lab\ 2/ +(circuitpython) pi@ixe00:~/Interactive-Lab-Hub/Lab 2 $ pip install -r requirements.txt +``` + +#### Open source hardware and software + +This class uses a lot of material that is developed with the intention of being free, open and accessible. All of the parts you used for this lab openly post their code and schematics should others want to riff on these designs or learn how they work. You are encouraged to [take](https://learn.adafruit.com/adafruit-mini-pitft-135x240-color-tft-add-on-for-raspberry-pi/downloads) [a](https://www.raspberrypi.org/documentation/hardware/raspberrypi/schematics/rpi_SCH_4b_4p0_reduced.pdf) [look](https://github.com/sparkfun/Qwiic_Button). You may find that someone has solved your problems for you and neatly packed them in a [library](https://github.com/gmparis/CircuitPython_I2C_Button). Feel free to look at and use solutions that others have posted so long as you **always cite their contributions**. + +To demonstrate the button we are using this [CircuitPython library](https://github.com/gmparis/CircuitPython_I2C_Button). You can also try this [Sparkfun Library](https://github.com/sparkfun/Qwiic_Button_Py) which has slightly simpler syntax. The devices and sensors in your kit have libraries that will allow you to integrate them with your Pi using python. They also provide examples of usage. If you are unsure about how to use something, look at its documentation then ask your TAs. + +Try running `python library_example.py`. + +Some important things to note from the code: + + * We create an I2C device to handle communication with the pi. + * We then scan for devices on the bus. + * We check if `default_addr = 0x6f` is listed in the found devices. This is the address your button comes programmed with, you can also change this and have it store the update on the button. + * Once we initialize the I2C_Button object the rest of the code shows us some of the builtin capabilities. + +## If it doesn't work + +The chances are that running `python library_example.py` works for a few seconds, before it returns 'OSError: [Errno 121] Remote I/O error'. This problem has to do with the I2C baudrate setting. If you run into this problem, you need to edit the /boot/config.txt file by typing: + +`sudo nano /boot/config.txt` + +- Search for where it says `dtparam=i2c_arm=on`, add a line beneath that that says `dtparam=i2c_arm_baudrate=10000`. +- If your `dtparam=i2c_arm=on` is commented out with a `#`, remove the `#` in front of `dtparam=i2c_arm=on`. +- Save the file and reboot your Pi with `sudo shutdown -r now`. + +After rebooting, `python library_example.py` should work. If it seems to glitch the first time, exit the script with Ctrl+C and run the same command again. + +## Under the I2C curtain (optional: complete only after working on your projects in groups) + +Run the file `I2C_scan.py` and the output should look like: + +``` +(circuitpython) pi@ixe00:~/Interactive-Lab-Hub/Lab 2 $ python I2C_scan.py +I2C ok! +I2C addresses found: [] +``` + +Now plug the other end of the cable into the ports on the right of the button board. The pwr LED should turn on. Run the file again and you should see the device ID. You can also try daisy chaining multiple devices and sensors and running again. + +``` +(circuitpython) pi@ixe00:~/Interactive-Lab-Hub/Lab 2 $ python I2C_scan.py +I2C ok! +I2C addresses found: ['0x6f'] +``` +#### Read device registers + +With I2C devices we can read the registers directly with `button_registers.py`. Run the command to see what the current registers for the button are. You can look [here](https://cdn.sparkfun.com/assets/learn_tutorials/1/1/0/8/Qwiic_Button_I2C_Register_Map.pdf) to try and figure out what the output means. + +#### Leverage abstraction + +Use a higher level device interface can make reading and writing registers for I2C devices easier. Try running `button_device.py` and pressing the button. Look at the code and the [list of registers](https://cdn.sparkfun.com/assets/learn_tutorials/1/1/0/8/Qwiic_Button_I2C_Register_Map.pdf) and see if you can figure out what line 56 is for. + +``` +56 write_register(device, STATUS, 0) +``` + +#### Connecting more that one button + +The more buttons the merrier! ...but how do you control them individually when they are come with the same default I2C address `0x6f`? + +Option 1 - Software: Look into the [list of registers](https://cdn.sparkfun.com/assets/learn_tutorials/1/1/0/8/Qwiic_Button_I2C_Register_Map.pdf) of the buttons again, is it possible to change the I2C address through software progrmming? + +Option 2 - Hardware: Look at the bottom right corner of the back of the button, you should be able to local a sign of ADR with A0 - A3 jumpers. By solding these I2C address jumpers, you can actually change the address directly! Check [here](https://learn.sparkfun.com/tutorials/sparkfun-qwiic-button-hookup-guide/all) to see how the I2C address change! + + + +## Connecting a Sensor + +Your kit is full of sensors! Look up what they can do and feel free to ask your TAs, we love to talk sensors. We will go further in depth into sensors in the coming weeks, but we put this small sample here to demonstrate how you can get sensor data if you want to use it for your project this week. + +We are going to connect the [Adafruit APDS9960 Proximity, Light, RGB, and Gesture Sensor](https://www.adafruit.com/product/3595). You can leave the button plugged in and daisy-chain the sensor, this is part of the magic of I2C. + + + + +Now run `python proximity.py`. What did you see? Check out [here](https://learn.adafruit.com/adafruit-apds9960-breakout/circuitpython) to learn more about the sensor and think about how you might be able to apply it in the future projects! diff --git a/Lab 2/I2C_scan.py b/Lab 2/I2C_scan.py new file mode 100644 index 0000000000..b6bf377e1f --- /dev/null +++ b/Lab 2/I2C_scan.py @@ -0,0 +1,14 @@ +import board +import busio + +# Try to create an I2C device +i2c = busio.I2C(board.SCL, board.SDA) +print("I2C ok!") +# ids = '\n'.join(map(str,i2c.scan())) +# print(f"I2C device ID's found:\n{ids}") + +while not i2c.try_lock(): + pass + +print("I2C addresses found:", [hex(device_address) for device_address in i2c.scan()]) +i2c.unlock() \ No newline at end of file diff --git a/Lab 2/Other ways to connect IxE to your computer.md b/Lab 2/Other ways to connect IxE to your computer.md new file mode 100644 index 0000000000..9c074b2663 --- /dev/null +++ b/Lab 2/Other ways to connect IxE to your computer.md @@ -0,0 +1,299 @@ + +## Connect IxE to your computer via the computer Ethernet port + +(based off of instructions from [Nikmart's IxE Git](https://github.com/nikmart/interaction-engine/wiki/Connect-IxE-to-your-computer-via-Ethernet-port)) + +## Connecting to The HOUSE Wifi + +1. Register the MAC address of your Raspberry Pi on The House network at https://selfcare.boingohotspot.net/login using Add a Device. +1. Edit the `/etc/wpa_supplicant/wpa_supplicant.conf` file with `nano` OR on the `\boot` volume that you see when the SD card is plugged into your computer, is a file called: `wpa_supplicant.conf.bak`. Duplicate the file and rename the duplicate to `wpa_supplicant.conf`. Now edit the duplicated file (`wpa_supplicant.conf`) and add the house wifi to the list of networks to connect to as shown below. Then safely eject the sd card, plug it back into the Pi and power it back up. +1. The section you need to add is +```shell +ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev +network={ + ssid="The House" + key_mgmt=NONE +} +``` + + +Afterward, your file should look something like the following. + +```shell +update_config=1 +country=US + +ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev +network={ + ssid="The House" + key_mgmt=NONE +} + +ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev +network={ + ssid="DeviceFarm" + psk="device@theFarm" + key_mgmt=WPA-PSK +} + +``` +3. Try logging into your device using ssh from a terminal. +4. If you need to see what device your IxE is on, use `iwconfig` or find it in this list [here](https://interactivedevice18.slack.com/files/U90LA9TLH/F92HXB020/ixe_ip_mac_hostname.xlsx): + +```shell +pi@ixe42:~ $ iwconfig wlan0 +wlan0 IEEE 802.11 ESSID:"The House" + Mode:Managed Frequency:2.462 GHz Access Point: 24:79:2A:21:58:C8 + Bit Rate=72.2 Mb/s Tx-Power=31 dBm + Retry short limit:7 RTS thr:off Fragment thr:off + Power Management:on + Link Quality=67/70 Signal level=-43 dBm + Rx invalid nwid:0 Rx invalid crypt:0 Rx invalid frag:0 + Tx excessive retries:0 Invalid misc:0 Missed beacon:0 +``` + + + + + + + + + +### Instructions for Mac + +1. Plug an ethernet cable from your Mac to the Raspberry Pi (note you may need to use a Thunderbolt to Ethernet or USB to Ethernet adapter if your Mac does not have a built-in Ethernet port). + +2. Check that the IxE are getting a self-assigned IP in System Preferences -> Network. It should have an orange color. + +3. To get Internet on your Pi, use Internet Sharing and share your Wifi with your Ethernet. (Note: This will not work on 802.11X like eduroam. If you are trying to do this on campus, connect to Cornell Visitor and then share your wifi) + +3. Try pinging your IxE with the .local extension: ping ixe05.local + +If the ping work, you can ssh in just like normal. + +### Instructions for PC + +[someone with a pc, please update this...] + +## Connect IxE to your computer via a separate WiFI card + +You can share a WiFi connection to the wider internet from your laptop if you can bring up a separate Wifi interface on your computer (for example by using a USB Wifi adapter). + +### Instructions for Mac + +1. Bring up the new WiFi interface. This will likely involve installing the drivers for the device, registering the new interface (for example, by using http://mycomputers.cit.cornell.edu at Cornell), and getting it online. + +1. Go to the Sharing control panel to enable Internet sharing from your newly installed interface to the WiFi network which you will share locally. Go to WiFi Options to configure your network to be named DeviceFarm, and the WPA2 password to be the the DeviceFarm password. Finally, check Internet Sharing to turn the sharing on. + +1. Power up your IxE. It should come up on your local network, and you should be able to access it via ssh like you would on the class network. + +[someone with a pc, please update this...] + +## Connect your IxE to your own WiFi + +Based on instructions found here: [https://howchoo.com/g/ndy1zte2yjn/how-to-set-up-wifi-on-your-raspberry-pi-without-ethernet](https://howchoo.com/g/ndy1zte2yjn/how-to-set-up-wifi-on-your-raspberry-pi-without-ethernet) + +If you have a WiFi router at home that you control, you can connect to it by setting the wifi configuration of your Pi. To do this: + +1. Use a text editor on your computer to create a file called `wpa_supplicant.conf` with the following text in it: + +```shell +ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev +network={ + ssid="DeviceFarm" + psk="device@theFarm" + key_mgmt=WPA-PSK +} + +ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev +network={ + ssid="YOUR WIFI NAME HERE" + psk="YOUR WIFI PASSWORD HERE" + key_mgmt=WPA-PSK +} +``` +2. Plug the SD card with the IxE image on it into your computer. +You should see a disk drive called `boot` mount to your computer. + +3. Open `boot` and copy the `wpa_supplicant.conf` file into the directory. + +4. Safely eject the SD card from your computer. + +5. Plug the SD card back into your IxE, then plug it into USB power. + +When the Pi boots up, it will copy the `wpa_supplicant.conf` file into the WiFi settings directory in `/etc/wpa_wupplicant/`. This will update your WiFi setting and should get the Pi on your home wifi. + + + +## Connecting to RedRover +You can get your Pi working on Cornell's `RedRover` network by: + +### Registering your Pi's MAC address to your Cornell account at: [https://dnsdb.cit.cornell.edu/dnsdb-cgi/mycomputers.cgi](https://dnsdb.cit.cornell.edu/dnsdb-cgi/mycomputers.cgi) + +You can find your MAC address using the spreadsheet (IXE_IP_MAC_HOSTNAME) we provided on the class Slack. The MAC address is associated with you ixe hostname in the form ixe[00] where [00] are your numbers. + +Register your MAC address as one of your devices. We recommend you name is ixe[00] so you know which registration this is for. + +### Adding a python script to your machine to email the ixe's IP to you + +1. While you are logged into you Pi (from DeviceFarm, The House, or through ethernet), create a new file for the `python` script that will email the IP to you + +```shell +nano startup_mailer.py +``` + +2. Copy and paste this python code into the editor + +```python +import subprocess +import smtplib +import socket +from email.mime.text import MIMEText +import datetime + +# Change to your own account information +to = 'YOUREMAIL@DOMAIN.com' +gmail_user = 'interactiveDeviceDesign@gmail.com' +gmail_password = 'device@theFarm' +smtpserver = smtplib.SMTP('smtp.gmail.com', 587) +smtpserver.ehlo() +smtpserver.starttls() +smtpserver.ehlo +smtpserver.login(gmail_user, gmail_password) +today = datetime.date.today() + +# Very Linux Specific +arg='ip route list' +p=subprocess.Popen(arg,shell=True,stdout=subprocess.PIPE) +data = p.communicate() +split_data = data[0].split() +ipaddr = split_data[split_data.index('src')+1] +my_ip = 'ixe[00] ip is %s' % ipaddr +msg = MIMEText(my_ip) +msg['Subject'] = 'IP for ixe58 on %s' % today.strftime('%b %d %Y') +msg['From'] = gmail_user +msg['To'] = to +smtpserver.sendmail(gmail_user, [to], msg.as_string()) +smtpserver.quit() +``` + +This script is setup with our class GMail account, `interactiveDeviceDesign@gmail.com`. We recommend you use this so that you do not need to store your own GMail password in clear text. + +3. Look for the line `to = 'YOUREMAIL@DOMAIN.com'` and replace the email address with your email. Any email like your GMail or Cornell Email should work fine. + +4. Put your ixe's number in the lines `my_ip = 'ixe[00] ip is %s' % ipaddr` and `msg['Subject'] = 'IP For ixe58 on %s' % today.strftime('%b %d %Y')` replacing the `[00]` with your number. + +4. Save the file and exit `nano` (using Ctrl+X, then choosing `yes`, then saving to `startup_mailer.py' + +5. Test the python code by running `python /home/pi/startup_mailer.py`. You should get an email with your IP address in about a minute. + +The email should look like this: + +```text +From: interactivedevicedesign@gmail.com +To: YOUREMAIL@DOMAIN.com + +ixe[00] ip is xxx.xxx.xxx.xxx <-- this will be your ixe number and the IP it has currently +``` + +**NOTE: A RedRover IP will be on 10.xxx.xxx.xxx. If you get something like 192.xxx.xxx.xxx then you are probably connected to `DeviceFarm`** + +6. Tell your Pi to run the `startup_mailer.py` code when your pi reboots using `cron` (a [cool Unix tool](https://en.wikipedia.org/wiki/Cron) that allows you to automate things on your machine) + +```shell +crontab -e +``` + +If `cron` asks you to choose an editor, we recommend choosing option `2 - nano` + +Once you are in `nano` you will edit the `crontab` file which lets you schedule when to run certain things + +``` +# Edit this file to introduce tasks to be run by cron. +# +# Each task to run has to be defined through a single line +# indicating with different fields when the task will be run +# and what command to run for the task +# +# To define the time you can provide concrete values for +# minute (m), hour (h), day of month (dom), month (mon), +# and day of week (dow) or use '*' in these fields (for 'any').# +# Notice that tasks will be started based on the cron's system +# daemon's notion of time and timezones. +# +# Output of the crontab jobs (including errors) is sent through +# email to the user the crontab file belongs to (unless redirected). +# +# For example, you can run a backup of all your user accounts +# at 5 a.m every week with: +# 0 5 * * 1 tar -zcf /var/backups/home.tgz /home/ +# +# For more information see the manual pages of crontab(5) and cron(8) +# +# m h dom mon dow command +``` + +Add the following line to the bottom of the file (make sure there is no `#` symbol as this makes the line a comment) + +``` +@reboot sleep 30 && python /home/pi/startup_mailer.py +``` + +This line tells your Pi to run `python /home/pi/startup_mailer.py` when your machine reboots. The `sleep 30` is there to give your Pi 30 seconds to wake up and load all the system resources before it emails you your IP (we have found that not having the sleep delay means the script does not send an email, probably because the Pi doesn't have an IP). + +Save and exit `nano` (using `Ctrl+X`, `yes`) + +7. Edit your `wpa_supplicant.conf` WiFi settings + +```shell +sudo nano /etc/wpa_supplicant/wpa_supplicant.conf +``` + +Add the following lines to the top of the file, above the `DeviceFarm` settings if you would prefer it to use `RedRover` before using `DeviceFarm` + +```text +ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev +network={ + ssid="RedRover" + key_mgmt=NONE +} +``` + +You can also comment out `DeviceFarm` settings so that you only connect to `RedRover`. Put `#` before all the lines for the `DeviceFarm` config settings. + +```text +#ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev +#network={ +# ssid="DeviceFarm" +# psk="device@theFarm" +# key_mgmt=WPA-PSK +#} +``` + +(If something goes wrong, you can always reset your WiFi settings using the `wpa_supplicant.conf.bak` file in the `boot` directory.) + +Save and exit `nano` (`Ctrl+X`, `yes`) + +8. Reboot your Pi using `sudo reboot`. If everything is configured correctly, you should get an email with your IP within a minute or two. + +### Connecting to your Pi using the IP it has with your laptop on `RedRover` or `eduroam` +1. Once you receive the email from you Pi, copy the IP address. + +**NOTE: A RedRover IP will be on 10.xxx.xxx.xxx. If you get something like 192.xxx.xxx.xxx then you are probably connected to `DeviceFarm`** + +2. Make sure your laptop is connected to `RedRover` or `eduroam` (`Cornell Visitor` will not work) + +#### On Mac/Linux +Open your Terminal (on Mac/Linux) or PuTTY (on Windows) and ssh using the IP address from the email + +```shell +ssh pi@xx.xx.xx.xx +``` + +#### On Windows +Use the IP from the email as as the location instead of `ixe[00]`. Make sure the `Port` is set to `22` + +3. You can access the webpage running on port `8000` (in our examples like `helloYou`) by going to the IP address then port 8000 iun your browser window + +`ex: 10.148.131.xxx:8000` diff --git a/Lab 2/PlacingMiniPiTFTonPi.jpg b/Lab 2/PlacingMiniPiTFTonPi.jpg new file mode 100644 index 0000000000..dfa25a7e50 Binary files /dev/null and b/Lab 2/PlacingMiniPiTFTonPi.jpg differ diff --git a/Lab 2/README.md b/Lab 2/README.md new file mode 100644 index 0000000000..192134e3d0 --- /dev/null +++ b/Lab 2/README.md @@ -0,0 +1,277 @@ +# Interactive Prototyping: The Clock of Pi +**NAMES OF COLLABORATORS HERE** + +Does it feel like time is moving strangely during this semester? + +For our first Pi project, we will pay homage to the [timekeeping devices of old](https://en.wikipedia.org/wiki/History_of_timekeeping_devices) by making simple clocks. + +It is worth spending a little time thinking about how you mark time, and what would be useful in a clock of your own design. + +**Please indicate anyone you collaborated with on this Lab here.** +Be generous in acknowledging their contributions! And also recognizing any other influences (e.g. from YouTube, Github, Twitter) that informed your design. + +## Prep + +Lab Prep is extra long this week. Make sure to start this early for lab on Thursday. + +1. ### Set up your Lab 2 Github + +Before the start of lab Thursday, ensure you have the latest lab content by updating your forked repository. + +**📖 [Follow the step-by-step guide for safely updating your fork](pull_updates/README.md)** + +This guide covers how to pull updates without overwriting your completed work, handle merge conflicts, and recover if something goes wrong. + + +2. ### Get Kit and Inventory Parts +Prior to the lab session on Thursday, taken inventory of the kit parts that you have, and note anything that is missing: + +***Update your [parts list inventory](partslist.md)*** + +3. ### Prepare your Pi for lab this week +[Follow these instructions](prep.md) to download and burn the image for your Raspberry Pi before lab Thursday. + + + + +## Overview +For this assignment, you are going to + +A) [Connect to your Pi](#part-a) + +B) [Try out cli_clock.py](#part-b) + +C) [Set up your RGB display](#part-c) + +D) [Try out clock_display_demo](#part-d) + +E) [Modify the code to make the display your own](#part-e) + +F) [Make a short video of your modified barebones PiClock](#part-f) + +G) [Sketch and brainstorm further interactions and features you would like for your clock for Part 2.](#part-g) + +## The Report +This readme.md page in your own repository should be edited to include the work you have done. You can delete everything but the headers and the sections between the \*\*\***stars**\*\*\*. Write the answers to the questions under the starred sentences. Include any material that explains what you did in this lab hub folder, and link it in the readme. + +Labs are due on Mondays. Make sure this page is linked to on your main class hub page. + +## Part A. +### Connect to your Pi +Just like you did in the lab prep, ssh on to your pi. Once you get there, create a Python environment (named venv) by typing the following commands. + +``` +ssh pi@ +... +pi@raspberrypi:~ $ python -m venv venv +pi@raspberrypi:~ $ source venv/bin/activate +(venv) pi@raspberrypi:~ $ + +``` +### Setup Personal Access Tokens on GitHub +Set your git name and email so that commits appear under your name. +``` +git config --global user.name "Your Name" +git config --global user.email "yourNetID@cornell.edu" +``` + +The support for password authentication of GitHub was removed on August 13, 2021. That is, in order to link and sync your own lab-hub repo with your Pi, you will have to set up a "Personal Access Tokens" to act as the password for your GitHub account on your Pi when using git command, such as `git clone` and `git push`. + +Following the steps listed [here](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens) from GitHub to set up a token. Depends on your preference, you can set up and select the scopes, or permissions, you would like to grant the token. This token will act as your GitHub password later when you use the terminal on your Pi to sync files with your lab-hub repo. +![IMG_2161 2](https://github.com/user-attachments/assets/3a6a68f2-b9df-47df-a297-efc6b8bf0133) + + +Screenshot 2025-09-15 at 10 56 14 + + +## Part B. +### Try out the Command Line Clock +Clone your own lab-hub repo for this assignment to your Pi and change the directory to Lab 2 folder (remember to replace the following command line with your own GitHub ID): + +``` +(venv) pi@raspberrypi:~$ git clone https://github.com//Interactive-Lab-Hub.git +(venv) pi@raspberrypi:~$ cd Interactive-Lab-Hub/Lab\ 2/ +``` +Depends on the setting, you might be asked to provide your GitHub user name and password. Remember to use the "Personal Access Tokens" you just set up as the password instead of your account one! + +Check if the directory has clone sucessfully, you should see the Interactive-Lab-Hub under the home directory listed: +``` +(venv) pi@raspberrypi:~ $ ls +Bookshelf Documents Music Public venv +create_img.sh Downloads pi-apps screen_boot_script.py Videos +Desktop Interactive-Lab-Hub Pictures Templates +(venv) pi@raspberrypi:~ $ +``` + + +Install the packages from the requirements.txt and run the example script `cli_clock.py`: + +``` +(venv) pi@raspberrypi:~/Interactive-Lab-Hub/Lab 2 $ pip install -r requirements.txt +(venv) pi@raspberrypi:~/Interactive-Lab-Hub/Lab 2 $ python cli_clock.py +02/24/2021 11:20:49 +``` + +The terminal should show the time, you can press `ctrl-c` to exit the script. +If you are unfamiliar with the Python code in `cli_clock.py`, have a look at [this Python refresher](https://hackernoon.com/intermediate-python-refresher-tutorial-project-ideas-and-tips-i28s320p). If you are still concerned, please reach out to the teaching staff! +Screenshot 2025-09-15 at 10 54 00 + + +## Part C. +### Set up your RGB Display +We have asked you to equip the [Adafruit MiniPiTFT](https://www.adafruit.com/product/4393) on your Pi in the Lab 2 prep already. Here, we will introduce you to the MiniPiTFT and Python scripts on the Pi with more details. + + + +The Raspberry Pi 4 has a variety of interfacing options. When you plug the pi in the red power LED turns on. Any time the SD card is accessed the green LED flashes. It has standard USB ports and HDMI ports. Less familiar it has a set of 20x2 pin headers that allow you to connect a various peripherals. + + + +To learn more about any individual pin and what it is for go to [pinout.xyz](https://pinout.xyz/pinout/3v3_power) and click on the pin. Some terms may be unfamiliar but we will go over the relevant ones as they come up. + +### Hardware (you have already done this in the prep) + +From your kit take out the display and the [Raspberry Pi 5](https://www.google.com/url?sa=i&url=https%3A%2F%2Fwww.raspberrypi.com%2Fproducts%2Fraspberry-pi-5%2F&psig=AOvVaw330s4wIQWfHou2Vk3-0jUN&ust=1757611779758000&source=images&cd=vfe&opi=89978449&ved=0CBMQjRxqFwoTCPi1-5_czo8DFQAAAAAdAAAAABAE) + +Line up the screen and press it on the headers. The hole in the screen should match up with the hole on the raspberry pi. + +

+ + +

+ +### Testing your Screen + +The display uses a communication protocol called [SPI](https://www.circuitbasics.com/basics-of-the-spi-communication-protocol/) to speak with the raspberry pi. We won't go in depth in this course over how SPI works. The port on the bottom of the display connects to the SDA and SCL pins used for the I2C communication protocol which we will cover later. GPIO (General Purpose Input/Output) pins 23 and 24 are connected to the two buttons on the left. GPIO 22 controls the display backlight. + +To show you the IP and Mac address of the Pi to allow connecting remotely we created a service that launches a python script that runs on boot. For the following steps stop the service by typing ``` sudo systemctl stop piscreen.service --now```. Othwerise two scripts will try to use the screen at once. You may start it again by typing ``` sudo systemctl start piscreen.service --now``` + +We can test it by typing +``` +(venv) pi@raspberrypi:~/Interactive-Lab-Hub/Lab 2 $ python screen_test.py +``` + +You can type the name of a color then press either of the buttons on the MiniPiTFT to see what happens on the display! You can press `ctrl-c` to exit the script. Take a look at the code with +``` +(venv) pi@raspberrypi:~/Interactive-Lab-Hub/Lab 2 $ cat screen_test.py +``` +![IMG_2221](https://github.com/user-attachments/assets/23465f80-3e5c-4b91-9568-6c9729cf27a5) + + +#### Displaying Info with Texts +You can look in `screen_boot_script.py` for how to display text on the screen! +![IMG_2229](https://github.com/user-attachments/assets/9ff7c16c-5ab2-48c3-8eca-67d24da58381) + + +#### Displaying an image + +You can look in `image.py` for an example of how to display an image on the screen. Can you make it switch to another image when you push one of the buttons? + +![IMG_2230](https://github.com/user-attachments/assets/91fe7341-223f-49fb-9eff-d7c3a7fd9ede) + + +## Part D. +### Set up the Display Clock Demo +Work on `screen_clock.py`, try to show the time by filling in the while loop (at the bottom of the script where we noted "TODO" for you). You can use the code in `cli_clock.py` and `stats.py` to figure this out. + +### How to Edit Scripts on Pi +Option 1. One of the ways for you to edit scripts on Pi through terminal is using [`nano`](https://linuxize.com/post/how-to-use-nano-text-editor/) command. You can go into the `screen_clock.py` by typing the follow command line: +``` +(venv) pi@raspberrypi:~/Interactive-Lab-Hub/Lab 2 $ nano screen_clock.py +``` +You can make changes to the script this way, remember to save the changes by pressing `ctrl-o` and press enter again. You can press `ctrl-x` to exit the nano mode. There are more options listed down in the terminal you can use in nano. + +Option 2. Another way for you to edit scripts is to use VNC on your laptop to remotely connect your Pi. Try to open the files directly like what you will do with your laptop and edit them. Since the default OS we have for you does not come up a python programmer, you will have to install one yourself otherwise you will have to edit the codes with text editor. [Thonny IDE](https://thonny.org/) is a good option for you to install, try run the following command lines in your Pi's ternimal: + + ``` + pi@raspberrypi:~ $ sudo apt install thonny + pi@raspberrypi:~ $ sudo apt update && sudo apt upgrade -y + ``` + +Now you should be able to edit python scripts with Thonny on your Pi. + +Option 3. A nowadays often preferred method is to use Microsoft [VS code to remote connect to the Pi](https://www.raspberrypi.com/news/coding-on-raspberry-pi-remotely-with-visual-studio-code/). This gives you access to a fullly equipped and responsive code editor with terminal and file browser. + +Pro Tip: Using tools like [code-server](https://coder.com/docs/code-server/latest) you can even setup a VS Code coding environment hosted on your raspberry pi and code through a web browser on your tablet or smartphone! + +![IMG_2218](https://github.com/user-attachments/assets/091d776a-2c09-4ee7-b8d9-57fd50aa8619) + + +## Part E. Now moved to Lab2 Part 2. + +## Part F. Now moved to Lab2 Part 2. + +## Part G. +## Sketch and brainstorm further interactions and features you would like for your clock for Part 2. +1. Say Hello +2. Display the time +3. Display the date +4. Display the weather (temperature) +5. Display the location (city and country) +6. Have a key/manual/ way to describe how it works + +# Prep for Part 2 + +1. Pick up remaining parts for kit on Thursday lab class. Check the updated [parts list inventory](partslist.md) and let the TA know if there is any part missing. + + +2. Look at and give feedback on the Part G. for at least 2 other people in the class (and get 2 people to comment on your Part G!) + The feedback I received on Part G was very helpful. I was advised that while adding features like date and location was a good idea, it was more important to ensure the time was clearly visible, possibly with a key. It was also suggested that I consider the image size for all designs since the screen is so small (it may not be able to include all features). + +After observing my sketch, my classmate suggested an additional feature: adding dotted lines from the 'X' to both the x and y axes to make it easier to read. B + +The feedback I received was from Iqra and Kyle. + +# Lab 2 Part 2 + +## Assignment that was formerly Lab 2 Part E. +### Modify the barebones clock to make it your own + +Does time have to be linear? How do you measure a year? [In daylights? In midnights? In cups of coffee?](https://www.youtube.com/watch?v=wsj15wPpjLY) + +Can you make time interactive? You can look in `screen_test.py` for examples for how to use the buttons. +Yes, I used the buttons to display a screen with the graph key (including the title, the axis and the 'X' marker) + +Please sketch/diagram your clock idea. (Try using a [Verplank digram](http://www.billverplank.com/IxDSketchBook.pdf)! + +**We strongly discourage and will reject the results of literal digital or analog clock display.** +![GraphClockSketch](https://github.com/user-attachments/assets/7e448d70-1f03-4f24-806c-cbedb909ee2b) + + +\*\*\***A copy of your code should be in your Lab 2 Github repo.**\*\*\* + +See screen_clock.py or pdf below: +[Graph Clock Code.pdf](https://github.com/user-attachments/files/22375729/Graph.Clock.Code.pdf) + + +## Assignment that was formerly Part F. +## Make a short video of your modified barebones PiClock + +\*\*\***Take a video of your PiClock.**\*\*\* + + +https://github.com/user-attachments/assets/440c2191-61bc-42a8-a7d8-fcccd3ebba89 + + + + +After you edit and work on the scripts for Lab 2, the files should be upload back to your own GitHub repo! You can push to your personal github repo by adding the files here, commiting and pushing. + +``` +(venv) pi@raspberrypi:~/Interactive-Lab-Hub/Lab 2 $ git add . +(venv) pi@raspberrypi:~/Interactive-Lab-Hub/Lab 2 $ git commit -m 'your commit message here' +(venv) pi@raspberrypi:~/Interactive-Lab-Hub/Lab 2 $ git push +``` + +After that, Git will ask you to login to your GitHub account to push the updates online, you will be asked to provide your GitHub user name and password. Remember to use the "Personal Access Tokens" you set up in Part A as the password instead of your account one! Go on your GitHub repo with your laptop, you should be able to see the updated files from your Pi! + + +[Update your Lab Hub](pull_updates/README.md) to get the latest content and requirements for Part 2. + +Modify the code from last week's lab to make a new visual interface for your new clock. You may [extend the Pi](Extending%20the%20Pi.md) by adding sensors or buttons, but this is not required. + +As always, make sure you document contributions and ideas from others explicitly in your writeup. + +You are permitted (but not required) to work in groups and share a turn in; you are expected to make equal contribution on any group work you do, and N people's group project should look like N times the work of a single person's lab. What each person did should be explicitly documented. Make sure the page for the group turn in is linked to your Interactive Lab Hub page. + + diff --git a/Lab 2/^C b/Lab 2/^C new file mode 100644 index 0000000000..e69de29bb2 diff --git a/Lab 2/button_device.py b/Lab 2/button_device.py new file mode 100644 index 0000000000..2994ed2585 --- /dev/null +++ b/Lab 2/button_device.py @@ -0,0 +1,58 @@ +import busio +import board +import time +from adafruit_bus_device.i2c_device import I2CDevice +from struct import pack, unpack + +DEVICE_ADDRESS = 0x6f # device address of our button +STATUS = 0x03 # reguster for button status +AVAILIBLE = 0x1 +BEEN_CLICKED = 0x2 +IS_PRESSED = 0x4 + + +# The follow is for I2C communications +i2c = busio.I2C(board.SCL, board.SDA) +device = I2CDevice(i2c, DEVICE_ADDRESS) + +def write_register(dev, register, value, n_bytes=1): + # Write a wregister number and value + buf = bytearray(1 + n_bytes) + buf[0] = register + buf[1:] = value.to_bytes(n_bytes, 'little') + with dev: + dev.write(buf) + +def read_register(dev, register, n_bytes=1): + # write a register number then read back the value + reg = register.to_bytes(1, 'little') + buf = bytearray(n_bytes) + with dev: + dev.write_then_readinto(reg, buf) + return int.from_bytes(buf, 'little') + +# clear out LED lighting settings. For more info https://cdn.sparkfun.com/assets/learn_tutorials/1/1/0/8/Qwiic_Button_I2C_Register_Map.pdf +write_register(device, 0x1A, 1) +write_register(device, 0x1B, 0, 2) +write_register(device, 0x19, 0) + + +while True: + try: + # get the button status + btn_status = read_register(device, STATUS) + print(f"AVAILIBLE: {(btn_status&AVAILIBLE != 0)} BEEN_CLICKED: {(btn_status&BEEN_CLICKED != 0)} IS_PRESSED: {(btn_status&IS_PRESSED != 0)}") + # if pressed light LED + if (btn_status&IS_PRESSED) !=0: + write_register(device, 0x19, 255) + # otherwise turn it off + else: + write_register(device, 0x19, 0) + # don't slam the i2c bus + time.sleep(0.1) + + except KeyboardInterrupt: + # on control-c do...something? try commenting this out and running again? What might this do + write_register(device, STATUS, 0) + break + diff --git a/Lab 2/button_registers.py b/Lab 2/button_registers.py new file mode 100644 index 0000000000..1673388039 --- /dev/null +++ b/Lab 2/button_registers.py @@ -0,0 +1,35 @@ + +import board +import busio + +# modified from https://www.digikey.com/en/maker/projects/circuitpython-basics-i2c-and-spi/9799e0554de14af3850975dfb0174ae3 + +# Try to create an I2C device +i2c = busio.I2C(board.SCL, board.SDA) +print("I2C ok!") + +REGISTERS = (0, 32) # Range of registers to read, from the first up to (but + # not including!) the second value. + +REGISTER_SIZE = 2 # Number of bytes to read from each register. + +while not i2c.try_lock(): + pass +# Find the first I2C device available. +devices = i2c.scan() +while len(devices) < 1: + devices = i2c.scan() +device = devices[0] +print(f"Found device with address: {hex(device)}") + +# Scan all the registers and read their byte values. +result = bytearray(REGISTER_SIZE) +for register in range(*REGISTERS): + try: + i2c.writeto(device, bytes([register])) + i2c.readfrom_into(device, result) + print(f"Address {hex(register)}: {[hex(x) for x in result]}") + except OSError: + continue # Ignore registers that don't exist + +i2c.unlock() \ No newline at end of file diff --git a/Lab 2/cli_clock.py b/Lab 2/cli_clock.py new file mode 100644 index 0000000000..1d8944f419 --- /dev/null +++ b/Lab 2/cli_clock.py @@ -0,0 +1,5 @@ +from time import strftime, sleep +while True: + print (strftime("%m/%d/%Y %H:%M:%S"), end="", flush=True) + print("\r", end="", flush=True) + sleep(1) diff --git a/Lab 2/demo_pic/cornell_register.png b/Lab 2/demo_pic/cornell_register.png new file mode 100644 index 0000000000..890cba27ae Binary files /dev/null and b/Lab 2/demo_pic/cornell_register.png differ diff --git a/Lab 2/demo_pic/pi_wifi.png b/Lab 2/demo_pic/pi_wifi.png new file mode 100644 index 0000000000..dc2369f12e Binary files /dev/null and b/Lab 2/demo_pic/pi_wifi.png differ diff --git a/Lab 2/demo_pic/pi_withscreen.png b/Lab 2/demo_pic/pi_withscreen.png new file mode 100644 index 0000000000..61e76e5fa2 Binary files /dev/null and b/Lab 2/demo_pic/pi_withscreen.png differ diff --git a/Lab 2/demo_pic/ping.png b/Lab 2/demo_pic/ping.png new file mode 100644 index 0000000000..ffaf19549a Binary files /dev/null and b/Lab 2/demo_pic/ping.png differ diff --git a/Lab 2/demo_pic/ssh.png b/Lab 2/demo_pic/ssh.png new file mode 100644 index 0000000000..372902591e Binary files /dev/null and b/Lab 2/demo_pic/ssh.png differ diff --git a/Lab 2/demo_pic/ssh1.png b/Lab 2/demo_pic/ssh1.png new file mode 100644 index 0000000000..702bfe475c Binary files /dev/null and b/Lab 2/demo_pic/ssh1.png differ diff --git a/Lab 2/image.py b/Lab 2/image.py new file mode 100644 index 0000000000..0f13c01a3e --- /dev/null +++ b/Lab 2/image.py @@ -0,0 +1,101 @@ +# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries +# SPDX-License-Identifier: MIT + +""" +Be sure to check the learn guides for more usage information. + +This example is for use on (Linux) computers that are using CPython with +Adafruit Blinka to support CircuitPython libraries. CircuitPython does +not support PIL/pillow (python imaging library)! + +Author(s): Melissa LeBlanc-Williams for Adafruit Industries +""" + +import digitalio +import board +from PIL import Image, ImageDraw +import adafruit_rgb_display.ili9341 as ili9341 +import adafruit_rgb_display.st7789 as st7789 # pylint: disable=unused-import +import adafruit_rgb_display.hx8357 as hx8357 # pylint: disable=unused-import +import adafruit_rgb_display.st7735 as st7735 # pylint: disable=unused-import +import adafruit_rgb_display.ssd1351 as ssd1351 # pylint: disable=unused-import +import adafruit_rgb_display.ssd1331 as ssd1331 # pylint: disable=unused-import + +# Configuration for CS and DC pins (these are PiTFT defaults): +cs_pin = digitalio.DigitalInOut(board.D5) +dc_pin = digitalio.DigitalInOut(board.D25) +reset_pin = digitalio.DigitalInOut(board.D24) + +# Config for display baudrate (default max is 24mhz): +BAUDRATE = 24000000 + +# Setup SPI bus using hardware SPI: +spi = board.SPI() + +# pylint: disable=line-too-long +# Create the display: +# disp = st7789.ST7789(spi, rotation=90, # 2.0" ST7789 +# disp = st7789.ST7789(spi, height=240, y_offset=80, rotation=180, # 1.3", 1.54" ST7789 +# disp = st7789.ST7789(spi, rotation=90, width=135, height=240, x_offset=53, y_offset=40, # 1.14" ST7789 +# disp = hx8357.HX8357(spi, rotation=180, # 3.5" HX8357 +# disp = st7735.ST7735R(spi, rotation=90, # 1.8" ST7735R +# disp = st7735.ST7735R(spi, rotation=270, height=128, x_offset=2, y_offset=3, # 1.44" ST7735R +# disp = st7735.ST7735R(spi, rotation=90, bgr=True, # 0.96" MiniTFT ST7735R +# disp = ssd1351.SSD1351(spi, rotation=180, # 1.5" SSD1351 +# disp = ssd1351.SSD1351(spi, height=96, y_offset=32, rotation=180, # 1.27" SSD1351 +# disp = ssd1331.SSD1331(spi, rotation=180, # 0.96" SSD1331 +disp = st7789.ST7789( + spi, + cs=cs_pin, + dc=dc_pin, + rst=reset_pin, + baudrate=BAUDRATE, + width=135, + height=240, + x_offset=53, + y_offset=40, +) +# pylint: enable=line-too-long + +# Create blank image for drawing. +# Make sure to create image with mode 'RGB' for full color. +if disp.rotation % 180 == 90: + height = disp.width # we swap height/width to rotate it to landscape! + width = disp.height +else: + width = disp.width # we swap height/width to rotate it to landscape! + height = disp.height +image = Image.new("RGB", (width, height)) + +# Get drawing object to draw on image. +draw = ImageDraw.Draw(image) + +# Draw a black filled box to clear the image. +draw.rectangle((0, 0, width, height), outline=0, fill=(0, 0, 0)) +disp.image(image) + +image = Image.open("red.jpg") +backlight = digitalio.DigitalInOut(board.D22) +backlight.switch_to_output() +backlight.value = True + + +# Scale the image to the smaller screen dimension +image_ratio = image.width / image.height +screen_ratio = width / height +if screen_ratio < image_ratio: + scaled_width = image.width * height // image.height + scaled_height = height +else: + scaled_width = width + scaled_height = image.height * width // image.width +image = image.resize((scaled_width, scaled_height), Image.BICUBIC) + +# Crop and center the image +x = scaled_width // 2 - width // 2 +y = scaled_height // 2 - height // 2 +image = image.crop((x, y, x + width, y + height)) + +# Display image. +disp.image(image) + diff --git a/Lab 2/library_example.py b/Lab 2/library_example.py new file mode 100644 index 0000000000..0dec960437 --- /dev/null +++ b/Lab 2/library_example.py @@ -0,0 +1,88 @@ +# The MIT License (MIT) +# +# Copyright (c) 2020 Gregory M Paris +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +""" +`i2c_button_simpletest` +================================================================================ + +Demonstrate CircuitPython I2C Button (Sparkfun Qwiic Button/Switch/Arcade) + + +* Author(s): Gregory M Paris + +modified by ilan mandel 2021 +""" + +# imports +import time +from random import randint +import board +import busio +from i2c_button import I2C_Button + +# initialize I2C +i2c = busio.I2C(board.SCL, board.SDA) + +# scan the I2C bus for devices +while not i2c.try_lock(): + pass +devices = i2c.scan() +i2c.unlock() +print('I2C devices found:', [hex(n) for n in devices]) +default_addr = 0x6f +if default_addr not in devices: + print('warning: no device at the default button address', default_addr) + +# initialize the button +button = I2C_Button(i2c) + +# print some stuff +print('firmware version', button.version) +print('interrupts', button.interrupts) +print('debounce ms', button.debounce_ms) + +# demonstrate writing to registers +button.led_bright = randint(0, 255) +button.led_gran = randint(0, 1) +button.led_cycle_ms = randint(250, 2000) +button.led_off_ms = randint(100, 500) + +# demonstrate reading those registers +print('LED brightness', button.led_bright) +print('LED granularity', button.led_gran) +print('LED cycle ms', button.led_cycle_ms) +print('LED off ms', button.led_off_ms) + +# demonstrate button behavior +while True: + try: + button.clear() # status must be cleared manually + time.sleep(1) + print('status', button.status) + print('last click ms', button.last_click_ms) + print('last press ms', button.last_press_ms) + except KeyboardInterrupt: + button.clear() + button.led_bright = 0 + button.led_gran = 1 + button.led_cycle_ms = 0 + button.led_off_ms = 100 + break diff --git a/Lab 2/partslist.md b/Lab 2/partslist.md new file mode 100644 index 0000000000..92378296cf --- /dev/null +++ b/Lab 2/partslist.md @@ -0,0 +1,57 @@ +# Parts list inventory + +## Parts in the kit by Wednesday class Sep. 9th + +1 x [Raspberry Pi 5 Model B/8GB](https://www.raspberrypi.com/products/raspberry-pi-5/) + +1 x [Rasberry Pi Power Supply](https://www.raspberrypi.com/products/27w-power-supply/) + +1 x [64 GB MicroSD Card](https://www.raspberrypi.com/products/sd-cards/) + +1 x [USB Type C microSD Card Reader/Writer](https://www.adafruit.com/product/5212) + +1 x [Adafruit Mini PiTFT](https://www.adafruit.com/product/4393) + +1 x [Adafruit I2C Stemma QT Rotary Encoder Breakout with Encoder - STEMMA QT / Qwiic](https://www.adafruit.com/product/5880) + +1 x [Adafruit APDS9960 QT Proximity, Light, RBG, Gesture Sensor](https://www.adafruit.com/product/3595) + +1 x [Adafruit 6-DoF Accel + Gyro IMU LSM6DS3TR-C](https://www.adafruit.com/product/4503) + +1 x [Adafruit PCF8574 I2C GPIO Expander Breakout - STEMMA QT / Qwiic](https://www.adafruit.com/product/5545) + +1 x [Adafruit MPR121 12-Key Capacitive Touch Sensor Gator Breakout - STEMMA QT / Qwiic](https://www.adafruit.com/product/4830) + +1 x [Breadboard](https://www.adafruit.com/product/4539) + +1 x [Copper Foil Tape](https://www.amazon.com/Conductive-Shielding-Repellent-Electrical-Grounding/dp/B0741ZRP4W/ref=sr_1_5?dchild=1&keywords=conductive+copper+tape&qid=1628142003&sr=8-5) + +1 x [Alligator Clips](https://www.amazon.com/WGGE-WG-026-Pieces-Colors-Alligator/dp/B06ZXSCLDH/ref=sr_1_3) + +1 x [9G Servo](https://www.digikey.com/en/products/detail/dfrobot/SER0048) + +1 x [SparkFun Servo pHAT for Raspberry Pi](https://www.sparkfun.com/sparkfun-servo-phat-for-raspberry-pi.html) + +1 x [Micro HDMI Cable - 3ft](https://www.sparkfun.com/micro-hdmi-cable-3ft.html) + +1 x [SparkFun Qwiic SHIM](https://www.sparkfun.com/products/15794) + +1 x [SparkFun Qwiic Button - Red LED](https://www.sparkfun.com/products/15932) + +1 x [SparkFun Qwiic Button - Green LED](https://www.sparkfun.com/products/16842) + +1 x [SparkFun Qwiic Cable Kit](https://www.sparkfun.com/sparkfun-qwiic-cable-kit.html) + +1 x Bluetooth wireless speaker w/ microphone + + + + + + +## On hold +\*\*\***We will distrubute these parts for the future labs**\*\*\* + +1 x [Raspberry Pi Active Cooler](https://www.raspberrypi.com/products/active-cooler/) + +1 x [C270 HD Webcam](https://www.logitech.com/en-us/shop/p/c270-hd-webcam.960-000694) diff --git a/Lab 2/prep.md b/Lab 2/prep.md new file mode 100644 index 0000000000..5c94e7559c --- /dev/null +++ b/Lab 2/prep.md @@ -0,0 +1,219 @@ +# Prep your Pi + + +### To prepare your Pi, you will need: + +- Raspberry Pi 5 +- Raspberry Pi 5 Power Supply +- SD card + Reader +- [Adafruit MiniPiTFT](https://www.adafruit.com/product/4393) + + +### Burn your Pi image to your SD card +#### On your computer +- Download the [Raspberry Pi Imager](https://www.raspberrypi.org/software/) +- Download our copy of Raspbian at [this cornell canvas link](https://canvas.cornell.edu/courses/80789/discussion_topics/936162). +Download and use the ``rpi5-2025-09-08.img.gz`` file directly in the Raspberry Pi Imager (do not unzip). + +- If using windows: [Windows 11 SSH Client](https://docs.microsoft.com/en-us/windows/terminal/tutorials/ssh), [PuTTY](https://www.putty.org/) or [VS Code SSH](https://code.visualstudio.com/learn/develop-cloud/ssh-lab-machines). + +### Setting up your OS for the Pi +1. Plug the SD card into your computer using the card reader + +2. Go download and install the [Raspberry Pi Imager](https://www.raspberrypi.org/software/) on your laptop, download the the customed image file we made for the class. Open the Raspberry Pi Imager and choose the downloaded image file from "Choose OS" and the SD card from "Choose SD card". + +choose os + +3. Click the gear icon on the bottom right to open Advanced Settings. In here, you need to make two changes: +- change the "hostname" to something unique +- ~set the password for user "pi" to something unique to you that you can remember~ Albert says, change the password after you ssh in. +- do not change any of the other settings (username pi and network should stay as they are) + +4. Eject or unmount the microSD card reader, and then remove the SD card from the reader and reinsert it into SD card slot on the Pi: it is located on the bottom (silver rectangle on the right). + +Pi bottom side + +5. Take and connect the Adafruit MiniPiTFT to your pi with the configuration shown below, the MiniPiTFT should be on the top left corner of your Pi. + +MiniPiTFTonPi + +6. Boot the Pi by connecting it to a power source with USB-C connector. + +### Setting up your Pi to run in headless mode + +#### Connecting to your Pi remotely + +Unlike your laptop, the Pi doesn't come with its own keyboard or mouse. While you could plug in a monitor, keyboard, and mouse we will be connecting to your Pi over [SSH](https://en.wikipedia.org/wiki/Secure_Shell). You can do this in [Mac Terminal](https://blog.teamtreehouse.com/introduction-to-the-mac-os-x-command-line) or [Windows 10 SSH Client](https://docs.microsoft.com/en-us/windows/terminal/tutorials/ssh). + +*Note: This set up assumes you boot your raspberry pi the first time when on campus or in The House. If you have a screen, mouse and keyboard you can edit the /etc/wpa_supplicant/wpa_supplicant.conf on the pi to make it connect to your home network already now.* + + +1. When you boot up your Pi, the MiniPiTFT should have the following information shown: + + ```` + IP: xxx.xxx.xxx.xxx + NET: [YourWifiNetwork] + MAC: xx:xx:xx:xx:xx:xx + ```` + + The IP address is what you will need to SSH your Pi later through the same network. The media access control address (MAC address) is a unique identifier assigned to a network interface controller, you will need it later for registering the device if you are using Cornell network (e.g. RedRover). The NET shows which WiFi network your Pi is connected to. + + For MAC address: If you are planning to use Cornell network (e.g. RedRover and eduroam), you will have to register the device (your Pi) to the Cornell System to get it online. Please follow the instructions [here](https://it.cornell.edu/wifi/register-device-doesnt-have-browser) from Cornell. Register using the MAC address from your Pi's screen. If you are using the House network, you will need to register the device (your Pi) through [whitesky](https://myaccount.wscmdu.com/myaccount/devices). You might need to wait for a few minutes for your Pi to actually get online after registering it. + + +MiniPiTFTonPi + +register for your mac address + +3. Verify your Pi is online. In the terminal of your laptop, type `ping ` and press enter. If your Pi is online, you should get similar messages as below (with different IP address): + + ```shell + PING 10.56.129.178 (10.56.129.178): 56 data bytes + 64 bytes from 10.56.129.178: icmp_seq=0 tt1=62 time=11.911 ms + 64 bytes from + 10.56.129.178: icmp_seq=1 ttl=62 time=8.179 ms + 64 bytes + from + 10.56.129.178: iсmp_seq=2 ttl=62 time=11.489 ms + 64 bytes + • from + 10.56.129.178: iсmp_seq=3 ttl=62 time=11.932 ms + ``` + + You can use `control-C` to interrupt and exit the ping (press the `control` key, and while holding it down, also press the `C` key, then let go of both together--this looks like `^C` in the terminal). + +4. Once your Pi is online, you can go ahead and SSH into the Pi. In the terminal of your laptop, type in the command + + ``` + $ ssh pi@ + ``` + + When you first log in it, the terminal will show you a "fingerprint" and ask you whether you want to continue connecting. Type `yes` and press enter. + + + ```shell + The authenticity of host '10.56.129.178 (10.56.129.178) ' can't be established. + ED25519 key fingerprint is SHA256:uRnRAlBikqynXuZ8vc/kVSR8ohLFawA0nn+3Er7TXm8. + This key is not known by any other names. + Are you sure you want to continue connecting (yes/no/[fingerprint])? yes + ``` + + + + If you set your password in the Advanced Settings during imaging, enter that password. If you didn't, the initial setting of your Pi's password is `student@tech`, type it and press enter. Note: the terminal will not show what you type for security so do not worry about it and just make sure you type the correct password. After that, you should see something similar to this: + + + ``` + pi@10.56.129.178's password: + Linux raspberrypi 6.12.25+rpt-rpi-2712 #1 SMP PREEMPT Debian 1:6.12.25-1+rpt1 (2025-04-30) aarch64 + + The programs included with the Debian GNU/Linux system are free software; + the exact distribution terms for each program are described in the + individual files in /usr/share/doc/*/copyright. + + Debian GNU/Linux comes with ABSOLUTELY NO WARRANTY, to the extent + permitted by applicable law. + Last login: Wed Sep 10 13:10:52 2025 from 128.84.84.249 + pi@raspberrypi:~ $ + + ``` + + + This means you are signed in and your terminal is now connected directly to the 'terminal' on your Pi, via `ssh`. You can tell this by looking at the user and hostname at the beginning of each line, which should now look like: + + ```shell + pi@raspberry ~ $ + ``` + + +### If you want to change the password of your Pi + +Write it down somewhere because we do not know how to recover lost passwords on the Pi. In the terminal on your Pi, type `sudo raspi-config` and press enter, you should be able to see the manual of your Pi: + +Pi configuration + +Choose '1. System Options' and 'S3 Password', they terminal will then ask you to enter your new password. Again, the terminal will not show what you type for security so do not worry about it and just make sure you type the correct new password twice. After you change the password successfully, you will have to use the new password next time you SSH to your Pi. + +### Refresh your knowledge of command line interfaces: + +The command line/terminal is a powerful way to interact with your computer without using a Graphical User Interface (GUI). When you SSH onto your Pi, you have a prompt you can enter commands. In your terminal there is a shell, there are many shells but for this class we will use one of the most common **bash** + + ``` + pi@raspberrypi:~ $ echo $SHELL + /bin/bash + pi@raspberrypi:~ $ + ``` +In the code above we've typed `echo $SHELL`. The `echo` tells it to print something to the screen. You could try typing `echo 'hello'` to see how that works for strings. The `$` at the front of `$SHELL` tells bash we are referring to a variable. In this case it is a variable the OS is using to store the shell program. In a folder `/bin` is a program called bash that we are currently using. The up arrow with show the most recent command. + + + +#### Navigation in the command line + +There are many commands you can use in the command line, they can take a variety of options that change how they are used. You can look these up online to learn more. Many commands have a manual page with documentation that you can see directly in the terminal by typing `man [command]`. For example: + + ```shell + pi@raspberrypi:~ $ man echo + ECHO(1) User Commands ECHO(1) + + NAME + echo - display a line of text + SYNOPSIS + echo [SHORT-OPTION]... [STRING]... + echo LONG-OPTION + DESCRIPTION + Echo the STRING(s) to standard output. + -n do not output the trailing newline + -e enable interpretation of backslash escapes + -E disable interpretation of backslash escapes (default) + --help display this help and exit + --version + Manual page echo(1) line 1 (press h for help or q to quit) + ``` + + +These are some useful commands. Read the manual pages for advanced usage. + +* `pwd` - print working directory, tells us where on the computer we are +* `ls` - list the things in the current directory. +* `cd` - change directory. This lets you move to another folder on your machine. +* `mkdir` - make directory. You can create directories with this command +* `cp` - copy a file. You can copy from one place to any other place +* `mv` - move a file, also used to rename a file +* `rm` - delete a file. To delete a folder you need the recursive flag `rm -r [folder]` +* `cat` - view a file +* `nano` - this is a text editor (there are many) that will let you edit files in terminal. + +There is plenty more to learn about using the terminal to navigate a computer but this should give a good start for getting around the raspberry pi. + + +### Using VNC to see your Pi desktop +Another convenient way to remotely connect to your Pi is using VNC (Virtual Network Computing), it essentially is remote login. The easiest client to use is [VNC Connect](https://www.realvnc.com/en/connect/download/viewer/). Download and install it. Once that's done type the IP address of your Pi in the text-box at the top. +![](images/VNC1.png) + +After that a login window should appear, use your normal logins (originally: Account=pi, Password=raspberry). +![](images/VNC2.png) + +You might want to change a few settings to improve the VNC experience such as changing the display resolution. +To change the resolution, run the command sudo raspi-config, navigate to Display Options > VNC Resolution, and choose an option. +See here for more troubleshooting [realvnc.com Pi Setup](https://help.realvnc.com/hc/en-us/articles/360002249917-VNC-Connect-and-Raspberry-Pi). + + +At that point the normal RPi desktop should appear and you can start and stop programs from here. + +### Setting up WendyTA - Your AI Teaching Assistant + +For this course, we have **WendyTA**, an AI Teaching Assistant that can help you with coding, debugging, brainstorming, and learning. WendyTA is automatically activated through GitHub Copilot Chat when working in this repository. + +**📖 Learn more about WendyTA**: [WendyTA Documentation](https://github.com/IRL-CT/Interactive-Lab-Hub/tree/Fall2025/WendyTA) + +#### Recommended Setup Options: + +1. **VS Code Server on your laptop** (Recommended): Use VS Code's Remote SSH extension to connect to your Pi and code directly with WendyTA available. + +2. **VNC + VS Code on Pi**: Use VNC to access the Pi desktop and install VS Code there with GitHub Copilot extension. + +**Setup Instructions**: [WendyTA Copilot Setup Guide](https://github.com/IRL-CT/Interactive-Lab-Hub/blob/Fall2025/WendyTA/setup/copilot-setup.md) + +✨ **Note**: WendyTA works through both SSH/VS Code Server and VNC connections, so choose the method that works best for your setup! + + diff --git a/Lab 2/proximity.py b/Lab 2/proximity.py new file mode 100644 index 0000000000..7de0ffa4cf --- /dev/null +++ b/Lab 2/proximity.py @@ -0,0 +1,13 @@ +import board +import busio +import adafruit_apds9960.apds9960 +import time +i2c = busio.I2C(board.SCL, board.SDA) +sensor = adafruit_apds9960.apds9960.APDS9960(i2c) + +sensor.enable_proximity = True + +while True: + prox = sensor.proximity + print(prox) + time.sleep(0.2) \ No newline at end of file diff --git a/Lab 2/pull_updates/README.md b/Lab 2/pull_updates/README.md index b1b6c47a06..3aaa08f78a 100644 --- a/Lab 2/pull_updates/README.md +++ b/Lab 2/pull_updates/README.md @@ -45,7 +45,37 @@ If you see merge conflicts: 2. Click **"Resolve conflicts"** on GitHub's web interface 3. Or pull the changes locally and resolve conflicts in your editor -## Method 2: Command Line Approach +## Method 2: Using Pull Requests Within Your Fork + +**Alternative approach**: Create a pull request **within your own repository** to pull updates from the course repo. + +### Step-by-Step Process: +1. **Go to your forked repository** on GitHub (`your-username/Interactive-Lab-Hub`) +2. **Click on "Pull requests"** tab +3. **Click "New pull request"** button +4. **Set the repositories correctly**: + - **Base repository**: `your-username/Interactive-Lab-Hub` (your fork) + - **Head repository**: `IRL-CT/Interactive-Lab-Hub` (the course repo) +5. **If needed**: Click the blue **"compare across forks"** link to see cross-fork options + +![Compare Across Forks](pull_into_own_repo_request.png) + +1. **Make sure branches match**: Usually both should be `Fall2025` (the current semester) +2. **Click "Create pull request"** +3. **Add a title**: e.g., "Pull course updates - Lab 2" +4. **Click "Create pull request"** again +5. **Click "Merge pull request"** to complete the update +6. **Click "Confirm merge"** + +### When to Use This Method: +- When the "Sync fork" button isn't available +- When you prefer more control over the merge process +- When you want to review changes before merging +- When working with the traditional GitHub workflow + +**Reference**: This follows the process described in [the original course documentation](https://github.com/IRL-CT/Developing-and-Designing-Interactive-Devices/blob/2023Fall/readings/Submitting%20Labs.md) + +## Method 3: Command Line Approach ### Step 1: Add Upstream Remote (One-time setup) ```bash diff --git a/Lab 2/pull_updates/pull_into_own_repo_request.png b/Lab 2/pull_updates/pull_into_own_repo_request.png new file mode 100644 index 0000000000..157ce337e8 Binary files /dev/null and b/Lab 2/pull_updates/pull_into_own_repo_request.png differ diff --git a/Lab 2/red.jpg b/Lab 2/red.jpg new file mode 100644 index 0000000000..53b66834c3 Binary files /dev/null and b/Lab 2/red.jpg differ diff --git a/Lab 2/requirements.txt b/Lab 2/requirements.txt new file mode 100644 index 0000000000..0ff3f6d3d6 --- /dev/null +++ b/Lab 2/requirements.txt @@ -0,0 +1,20 @@ +Adafruit-Blinka==8.64.0 +adafruit-circuitpython-apds9960==3.1.16 +adafruit-circuitpython-busdevice==5.2.13 +adafruit-circuitpython-connectionmanager==3.1.5 +adafruit-circuitpython-register==1.10.4 +adafruit-circuitpython-requests==4.1.13 +adafruit-circuitpython-rgb-display==3.14.1 +adafruit-circuitpython-typing==1.12.1 +Adafruit-PlatformDetect==3.82.0 +Adafruit-PureIO==1.1.11 +binho-host-adapter==0.1.6 +lgpio==0.2.2.0 +pillow==11.3.0 +pip-chill==1.0.3 +pyftdi==0.57.1 +pyserial==3.5 +pyusb==1.3.1 +sysv-ipc==1.1.0 +typing_extensions==4.15.0 +webcolors==24.11.1 diff --git a/Lab 2/screen_boot_script.py b/Lab 2/screen_boot_script.py new file mode 100644 index 0000000000..224ecceec6 --- /dev/null +++ b/Lab 2/screen_boot_script.py @@ -0,0 +1,134 @@ +# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries +# SPDX-License-Identifier: MIT + +# -*- coding: utf-8 -*- + +import time +import subprocess +import digitalio +import board +from PIL import Image, ImageDraw, ImageFont +from adafruit_rgb_display import st7789 + + +# Configuration for CS and DC pins (these are FeatherWing defaults on M0/M4): +cs_pin = digitalio.DigitalInOut(board.D5) +dc_pin = digitalio.DigitalInOut(board.D25) +reset_pin = None + +# Config for display baudrate (default max is 24mhz): +BAUDRATE = 64000000 + +# Setup SPI bus using hardware SPI: +spi = board.SPI() + +# Create the ST7789 display: +disp = st7789.ST7789( + spi, + cs=cs_pin, + dc=dc_pin, + rst=reset_pin, + baudrate=BAUDRATE, + width=135, + height=240, + x_offset=53, + y_offset=40, +) + +# Create blank image for drawing. +# Make sure to create image with mode 'RGB' for full color. +height = disp.width # we swap height/width to rotate it to landscape! +width = disp.height +image = Image.new("RGB", (width, height)) +rotation = 90 + +# Get drawing object to draw on image. +draw = ImageDraw.Draw(image) + +# Draw a black filled box to clear the image. +draw.rectangle((0, 0, width, height), outline=0, fill=(0, 0, 0)) +disp.image(image, rotation) +# Draw some shapes. +# First define some constants to allow easy resizing of shapes. +padding = -2 +top = padding +bottom = height - padding +# Move left to right keeping track of the current x position for drawing shapes. +x = 0 + + +# Alternatively load a TTF font. Make sure the .ttf font file is in the +# same directory as the python script! +# Some other nice fonts to try: http://www.dafont.com/bitmap.php +font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", 24) + +# Turn on the backlight +backlight = digitalio.DigitalInOut(board.D22) +backlight.switch_to_output() +backlight.value = True + +# Button configuration +buttonA = digitalio.DigitalInOut(board.D23) +buttonB = digitalio.DigitalInOut(board.D24) +buttonA.switch_to_input() +buttonB.switch_to_input() + +mac_scroll_position = 0 +cpu_mem_disk_scroll_position = 0 + +while True: + # Draw a black filled box to clear the image. + draw.rectangle((0, 0, width, height), outline=0, fill=0) + + # Check button presses + if not buttonA.value: # Button A pressed (power off) + draw.text((0, 0), "Shutdown " * 10, font=font, fill="#FF0000") + disp.image(image, rotation) + subprocess.run(['sudo', 'poweroff']) + elif not buttonB.value: # Button B pressed (restart) + draw.text((0, 0), "Reboot " * 10, font=font, fill="#0000FF") + disp.image(image, rotation) + subprocess.run(['sudo', 'reboot']) + + y = top + + # IP Address + cmd = "hostname -I | cut -d' ' -f1" + IP = "IP: " + subprocess.check_output(cmd, shell=True).decode("utf-8") + draw.text((x, y), IP, font=font, fill="#FFFFFF") + y += font.size + + # Network Name + try: + cmd = "iwgetid -r" + Network = "Net: " + subprocess.check_output(cmd, shell=True).decode("utf-8").strip() + except subprocess.CalledProcessError: + Network = "Net: Error fetching network name" + + + draw.text((x, y), Network, font=font, fill="#FFFFFF") + y += font.size + + + # MAC Address + MAC = "MAC: " + subprocess.check_output("cat /sys/class/net/wlan0/address", shell=True).decode("utf-8").strip() + draw.text((x - mac_scroll_position, y), MAC, font=font, fill="#FFFFFF") + y += font.size + mac_scroll_position = (mac_scroll_position + 5) % font.size + + # CPU Usage, Memory and Disk Usage + CPU = subprocess.check_output("top -bn1 | grep load | awk '{printf \"CPU Load: %.2f\", $(NF-2)}'", shell=True).decode("utf-8") + MemUsage = subprocess.check_output("free -m | awk 'NR==2{printf \"Mem: %s/%s MB %.2f%%\", $3,$2,$3*100/$2 }'", shell=True).decode("utf-8") + Disk = subprocess.check_output('df -h | awk \'$NF=="/"{printf "Disk: %d/%d GB %s", $3,$2,$5}\'', shell=True).decode("utf-8") + CPUMemDisk = CPU + " | " + MemUsage + " | " + Disk + draw.text((x - cpu_mem_disk_scroll_position, y), CPUMemDisk, font=font, fill="#00FF00") + y += font.size + cpu_mem_disk_scroll_position = (cpu_mem_disk_scroll_position + 5) % font.size + + # CPU Temperature + Temp = subprocess.check_output("cat /sys/class/thermal/thermal_zone0/temp | awk '{printf \"CPU Temp: %.1f C\", $(NF-0) / 1000}'", shell=True).decode("utf-8") + draw.text((x, y), Temp, font=font, fill="#FF00FF") + + # Display image. + disp.image(image, rotation) + time.sleep(0.2) # Adjust the speed of the scrolling text \ No newline at end of file diff --git a/Lab 2/screen_clock.py b/Lab 2/screen_clock.py new file mode 100644 index 0000000000..bbb5289a00 --- /dev/null +++ b/Lab 2/screen_clock.py @@ -0,0 +1,204 @@ +import time +import subprocess +import digitalio +import board +from PIL import Image, ImageDraw, ImageFont +import adafruit_rgb_display.st7789 as st7789 +from adafruit_rgb_display.rgb import color565 +import colorsys +import calendar +import math + +# Configuration for CS and DC pins (these are FeatherWing defaults on M0/M4): +cs_pin = digitalio.DigitalInOut(board.D5) +dc_pin = digitalio.DigitalInOut(board.D25) +reset_pin = None + +# Config for display baudrate (default max is 24mhz): +BAUDRATE = 64000000 + +# Setup SPI bus using hardware SPI: +spi = board.SPI() + +# Create the ST7789 display: +disp = st7789.ST7789( + spi, + cs=cs_pin, + dc=dc_pin, + rst=reset_pin, + baudrate=BAUDRATE, + width=135, + height=240, + x_offset=53, + y_offset=40, +) + +# Button setup +btnA = digitalio.DigitalInOut(board.D23) +btnB = digitalio.DigitalInOut(board.D24) + +# Set buttons as inputs, with a pull-up resistor to avoid floating inputs +btnA.direction = digitalio.Direction.INPUT +btnB.direction = digitalio.Direction.INPUT +btnA.pull = digitalio.Pull.UP +btnB.pull = digitalio.Pull.UP + +# Create blank image for drawing. +# Make sure to create image with mode 'RGB' for full color. +height = disp.width # we swap height/width to rotate it to landscape! +width = disp.height +image = Image.new("RGB", (width, height)) +rotation = 90 + +# Get drawing object to draw on image. +draw = ImageDraw.Draw(image) + +# Draw a black filled box to clear the image. +draw.rectangle((0, 0, width, height), outline=0, fill=(0, 0, 0)) +disp.image(image, rotation) + +# Font for the X +font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", 10) + +# Font for the axis labels +label_font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", 8) + +# A font for the key and title +key_font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", 12) + +# Turn on the backlight +backlight = digitalio.DigitalInOut(board.D22) +backlight.switch_to_output() +backlight.value = True + +def draw_axes_clock(): + # Clear screen with white background + draw.rectangle((0, 0, width, height), outline=0, fill=(255, 255, 255)) + + # Borders and padding + border_width = 2 + padding_left = 25 + padding_right = 15 + padding_top = 15 + padding_bottom = 15 + + # Border around the entire display area + draw.rectangle((0, 0, width - 1, height - 1), outline=color565(255, 0, 0), width=border_width) + + # Plotting area (within padding) + plot_left = padding_left + plot_top = padding_top + plot_right = width - padding_right + plot_bottom = height - padding_bottom + + plotting_width = plot_right - plot_left + plotting_height = plot_bottom - plot_top + + # Axis colors and thickness + axis_color = color565(0, 0, 0) # Black + line_thickness = 1 + + # Horizontal axis (minutes) + draw.line((plot_left, plot_bottom, plot_right, plot_bottom), fill=axis_color, width=line_thickness) + + # Vertical axis (hours) + draw.line((plot_left, plot_bottom, plot_left, plot_top), fill=axis_color, width=line_thickness) + + # Markers and labels to the axes + marker_size = 3 + text_offset = 2 + + # Minute markers (every 15 minutes) + for minute_val in range(0, 61, 15): + x_pos = plot_left + int(minute_val / 60 * plotting_width) + + draw.line((x_pos, plot_bottom - marker_size, x_pos, plot_bottom + marker_size), fill=axis_color, width=line_thickness) + + text_bbox = draw.textbbox((0,0), str(minute_val), font=label_font) + text_width = text_bbox[2] - text_bbox[0] + + if minute_val == 60: + draw.text((x_pos - text_width, plot_bottom + text_offset), str(minute_val), font=label_font, fill=axis_color) + else: + draw.text((x_pos - text_width/2, plot_bottom + text_offset), str(minute_val), font=label_font, fill=axis_color) + + # Hour markers (every 3 hours) + for hour_val in range(0, 13, 3): + y_pos = plot_bottom - int(hour_val / 12 * plotting_height) + + draw.line((plot_left - marker_size, y_pos, plot_left + marker_size, y_pos), fill=axis_color, width=line_thickness) + + hour_label = str(hour_val) + if hour_val == 12: + hour_label = "12" + elif hour_val == 0: + hour_label = "0" + + text_bbox = draw.textbbox((0,0), hour_label, font=label_font) + text_width = text_bbox[2] - text_bbox[0] + text_height = text_bbox[3] - text_bbox[1] + + draw.text((plot_left - marker_size - text_offset - text_width, y_pos - text_height/2), hour_label, font=label_font, fill=axis_color) + + # Draw the arrowheads + arrow_size = 5 + # Right arrow on X-axis (minutes) + draw.polygon([(plot_right, plot_bottom), + (plot_right - arrow_size, plot_bottom - arrow_size), + (plot_right - arrow_size, plot_bottom + arrow_size)], fill=axis_color) + # Top arrow on Y-axis (hours) + draw.polygon([(plot_left, plot_top), + (plot_left - arrow_size, plot_top + arrow_size), + (plot_left + arrow_size, plot_top + arrow_size)], fill=axis_color) + + # Get the current time + current_time = time.localtime() + current_hour_24 = current_time.tm_hour + current_minute = current_time.tm_min + + # Convert to 12-hour format (1-12) + hour_12 = current_hour_24 % 12 + if hour_12 == 0: + hour_12 = 12 + + # Map minutes (0-59) to X-coordinate + x_plot = plot_left + int(current_minute / 59 * plotting_width) + + # Map hours (1-12) to Y-coordinate + # Y-axis is inverted: 1 at bottom, 12 at top + y_plot = plot_bottom - int((hour_12 - 1) / 11 * plotting_height) + + # Draw 'X' at the calculated position + text_bbox_x = draw.textbbox((0,0), 'X', font=font) + x_text_offset = (text_bbox_x[2] - text_bbox_x[0]) / 2 + y_text_offset = (text_bbox_x[3] - text_bbox_x[1]) / 2 + draw.text((x_plot - x_text_offset, y_plot - y_text_offset), 'X', font=font, fill="#FF0000") + +def draw_key_screen(): + # Clear screen with black background + draw.rectangle((0, 0, width, height), outline=0, fill=(0, 0, 0)) + + # Define text/ key + title = "GRAPH CLOCK" + key_text = "X = Time" + hours_key = "Y-Axis = Hours" + minutes_key = "X-Axis = Minutes" + + # Title (at the top) + draw.text((width/2 - 30, 20), title, font=key_font, fill="#FFFFFF") + + # Key (in the center) + draw.text((width/2 - 30, height/2 - 20), key_text, font=key_font, fill="#FF0000") + draw.text((width/2 - 30, height/2), hours_key, font=key_font, fill="#FFFFFF") + draw.text((width/2 - 30, height/2 + 20), minutes_key, font=key_font, fill="#FFFFFF") + + disp.image(image, rotation) + +while True: + if not btnB.value: # Button B is pressed when its value is False + draw_key_screen() + else: + draw_axes_clock() + disp.image(image, rotation) + + time.sleep(0.1) diff --git a/Lab 2/screen_test.py b/Lab 2/screen_test.py new file mode 100644 index 0000000000..24c977231f --- /dev/null +++ b/Lab 2/screen_test.py @@ -0,0 +1,87 @@ +# rpi5_minipitft_st7789.py +# Works on Raspberry Pi 5 with Adafruit Blinka backend (lgpio) and SPI enabled. +# Wiring change: connect the display's CS to GPIO5 (pin 29), not CE0. + +import time +import digitalio +import board + +from adafruit_rgb_display.rgb import color565 +import adafruit_rgb_display.st7789 as st7789 +import webcolors + +# --------------------------- +# SPI + Display configuration +# --------------------------- +# Use a FREE GPIO for CS to avoid conflicts with the SPI driver owning CE0/CE1. +cs_pin = digitalio.DigitalInOut(board.D5) # GPIO5 (PIN 29) <-- wire display CS here +dc_pin = digitalio.DigitalInOut(board.D25) # GPIO25 (PIN 22) +reset_pin = None + +# Safer baudrate for stability; you can try 64_000_000 if your wiring is short/clean. +BAUDRATE = 64000000 + +# Create SPI object on SPI0 (spidev0.* must exist; enable SPI in raspi-config). +spi = board.SPI() + +# For Adafruit mini PiTFT 1.14" (240x135) ST7789 use width=135, height=240, x/y offsets below. +# If you actually have a 240x240 panel, set width=240, height=240 and x_offset=y_offset=0. +display = st7789.ST7789( + spi, + cs=cs_pin, + dc=dc_pin, + rst=reset_pin, + baudrate=BAUDRATE, + width=135, + height=240, + x_offset=53, + y_offset=40, + # rotation=0 # uncomment/change if your screen orientation is off +) + +# --------------------------- +# Backlight + Buttons +# --------------------------- +backlight = digitalio.DigitalInOut(board.D22) # GPIO22 (PIN 15) +backlight.switch_to_output(value=True) + +buttonA = digitalio.DigitalInOut(board.D23) # GPIO23 (PIN 16) +buttonB = digitalio.DigitalInOut(board.D24) # GPIO24 (PIN 18) +# Use internal pull-ups; buttons then read LOW when pressed. +buttonA.switch_to_input(pull=digitalio.Pull.UP) +buttonB.switch_to_input(pull=digitalio.Pull.UP) + +# --------------------------- +# Ask user for a color +# --------------------------- +screenColor = None +while not screenColor: + try: + name = input('Type the name of a color and hit enter: ') + rgb = webcolors.name_to_rgb(name) + screenColor = color565(rgb.red, rgb.green, rgb.blue) + except ValueError: + print("whoops I don't know that one") + +# --------------------------- +# Main loop +# --------------------------- +print("Press A for WHITE, B for your color, both to turn backlight OFF.") +while True: + # Buttons are active-LOW because of pull-ups + a_pressed = (buttonA.value == False) + b_pressed = (buttonB.value == False) + + if a_pressed and b_pressed: + backlight.value = False # turn off backlight + else: + backlight.value = True # turn on backlight + + if b_pressed and not a_pressed: + display.fill(screenColor) # user's color + elif a_pressed and not b_pressed: + display.fill(color565(255, 255, 255)) # white + else: + display.fill(color565(0, 255, 0)) # green + + time.sleep(0.02) # small debounce / CPU break diff --git a/Lab 2/stats.py b/Lab 2/stats.py new file mode 100644 index 0000000000..010cb8410f --- /dev/null +++ b/Lab 2/stats.py @@ -0,0 +1,92 @@ +import time +import subprocess +import digitalio +import board +from PIL import Image, ImageDraw, ImageFont +import adafruit_rgb_display.st7789 as st7789 + + +# Configuration for CS and DC pins (these are FeatherWing defaults on M0/M4): +cs_pin = digitalio.DigitalInOut(board.D5) +dc_pin = digitalio.DigitalInOut(board.D25) +reset_pin = None + +# Config for display baudrate (default max is 24mhz): +BAUDRATE = 64000000 + +# Setup SPI bus using hardware SPI: +spi = board.SPI() + +# Create the ST7789 display: +disp = st7789.ST7789( + spi, + cs=cs_pin, + dc=dc_pin, + rst=reset_pin, + baudrate=BAUDRATE, + width=135, + height=240, + x_offset=53, + y_offset=40, +) + +# Create blank image for drawing. +# Make sure to create image with mode 'RGB' for full color. +height = disp.width # we swap height/width to rotate it to landscape! +width = disp.height +image = Image.new("RGB", (width, height)) +rotation = 90 + +# Get drawing object to draw on image. +draw = ImageDraw.Draw(image) + +# Draw a black filled box to clear the image. +draw.rectangle((0, 0, width, height), outline=0, fill=(0, 0, 0)) +disp.image(image, rotation) +# Draw some shapes. +# First define some constants to allow easy resizing of shapes. +padding = -2 +top = padding +bottom = height - padding +# Move left to right keeping track of the current x position for drawing shapes. +x = 0 + + +# Alternatively load a TTF font. Make sure the .ttf font file is in the +# same directory as the python script! +# Some other nice fonts to try: http://www.dafont.com/bitmap.php +font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", 24) + +# Turn on the backlight +backlight = digitalio.DigitalInOut(board.D22) +backlight.switch_to_output() +backlight.value = True + +while True: + # Draw a black filled box to clear the image. + draw.rectangle((0, 0, width, height), outline=0, fill=0) + + # Shell scripts for system monitoring from here: + # https://unix.stackexchange.com/questions/119126/command-to-display-memory-usage-USD-usage-and-WTTR-load + cmd = "hostname -I | cut -d' ' -f1" + IP = "IP: " + subprocess.check_output(cmd, shell=True).decode("utf-8") + cmd = "curl -s wttr.in/?format=2" + WTTR = subprocess.check_output(cmd, shell=True).decode("utf-8") + cmd = 'curl -s ils.rate.sx/1USD | cut -c1-6' + USD = "$1USD = ₪" + subprocess.check_output(cmd, shell=True).decode("utf-8") + "ILS" + cmd = "cat /sys/class/thermal/thermal_zone0/temp | awk '{printf \"CPU Temp: %.1f C\", $(NF-0) / 1000}'" + Temp = subprocess.check_output(cmd, shell=True).decode("utf-8") + + # Write four lines of text. + y = top + draw.text((x, y), IP, font=font, fill="#FFFFFF") + # y += font.getsize(IP)[1] + y += draw.textbbox((0,0), IP, font=font)[3] + draw.text((x, y), WTTR, font=font, fill="#FFFF00") + y += draw.textbbox((0,0), WTTR, font=font)[3] + draw.text((x, y), USD, font=font, fill="#0000FF") + y += draw.textbbox((0,0), USD, font=font)[3] + + # Display image. + disp.image(image, rotation) + time.sleep(0.1) diff --git a/Lab 3/Deliverables/IMG_2512.mov b/Lab 3/Deliverables/IMG_2512.mov new file mode 100644 index 0000000000..ba2ed8303b Binary files /dev/null and b/Lab 3/Deliverables/IMG_2512.mov differ diff --git a/Lab 3/Deliverables/Ollama Documentation.pdf b/Lab 3/Deliverables/Ollama Documentation.pdf new file mode 100644 index 0000000000..d479bf2307 Binary files /dev/null and b/Lab 3/Deliverables/Ollama Documentation.pdf differ diff --git a/Lab 3/Deliverables/Screenshot 2025-09-28 at 00.15.36.png b/Lab 3/Deliverables/Screenshot 2025-09-28 at 00.15.36.png new file mode 100644 index 0000000000..ad4f83b412 Binary files /dev/null and b/Lab 3/Deliverables/Screenshot 2025-09-28 at 00.15.36.png differ diff --git a/Lab 3/Deliverables/WordBot StoryBoard.jpg b/Lab 3/Deliverables/WordBot StoryBoard.jpg new file mode 100644 index 0000000000..6017a1245d Binary files /dev/null and b/Lab 3/Deliverables/WordBot StoryBoard.jpg differ diff --git a/Lab 3/Deliverables/Wordbot Script.pdf b/Lab 3/Deliverables/Wordbot Script.pdf new file mode 100644 index 0000000000..33129c837b Binary files /dev/null and b/Lab 3/Deliverables/Wordbot Script.pdf differ diff --git a/Lab 3/Deliverables/final_voice_assistant.py b/Lab 3/Deliverables/final_voice_assistant.py new file mode 100644 index 0000000000..16b24406a3 --- /dev/null +++ b/Lab 3/Deliverables/final_voice_assistant.py @@ -0,0 +1,93 @@ +# ollama_voice_assistant.py + +import speech_recognition as sr +import requests +import os +import time + +# --- Configuration --- +MICROPHONE_INDEX = 2 +MODEL_NAME = "phi3:mini" +OLLAMA_URL = "http://localhost:11434/api/generate" + +ENERGY_THRESHOLD = 150 + +def speak(text): + """Uses the espeak command line tool for Text-to-Speech with better parameters.""" + + text = text.replace("'", "'\\''") + # -v en+f3: Female English voice | -s 150: Speed 150 WPM | -k 15: Pitch/Inflection + print(f"AI Speaking: {text}") + os.system(f"espeak -v en+f3 -s 150 -k 15 '{text}' 2>/dev/null") + +def transcribe_speech(): + """Listens for user input and converts it to text.""" + r = sr.Recognizer() + try: + with sr.Microphone(device_index=MICROPHONE_INDEX) as source: + r.adjust_for_ambient_noise(source) + r.energy_threshold = ENERGY_THRESHOLD + + print("\nListening... Speak now.") + speak("Ready. Ask me anything.") + + + time.sleep(0.5) + + audio = r.listen(source, timeout=8, phrase_time_limit=15) + + except Exception as e: + print(f"Microphone error: {e}. Check MICROPHONE_INDEX ({MICROPHONE_INDEX}).") + speak("I am having trouble accessing the microphone.") + return None + + try: + print("Transcribing via Google Speech Recognition...") + text = r.recognize_google(audio) + print(f"User Said: {text}") + return text + except sr.UnknownValueError: + print("Could not understand audio.") + speak("I didn't catch that. Could you repeat it?") + return None + except sr.RequestError as e: + print(f"Speech recognition service error: {e}") + speak("My transcription service is currently unavailable.") + return None + +def ask_ai(question): + """Sends the question to the local Ollama model.""" + print("Sending request to Ollama...") + try: + # Long timeout (120 seconds) for the RPi's slow processing + response = requests.post( + OLLAMA_URL, + json={"model": MODEL_NAME, "prompt": question, "stream": False}, + timeout=120 + ) + response.raise_for_status() + return response.json().get('response', 'No response received from the model.') + except requests.exceptions.RequestException as e: + print(f"Error communicating with Ollama: {e}") + return "I seem to be having trouble connecting to the AI model on port 11434." + +def main_assistant_loop(): + """Main loop for the voice assistant.""" + speak("Voice assistant is active. Say 'stop' to quit.") + while True: + user_text = transcribe_speech() + + if user_text: + if "stop" in user_text.lower() or "exit" in user_text.lower() or "quit" in user_text.lower(): + speak("Goodbye.") + print("Exiting assistant.") + break + + ai_response = ask_ai(user_text) + + if ai_response: + print(f"AI Response: {ai_response}") + speak(ai_response) + +if __name__ == "__main__": + main_assistant_loop() diff --git a/Lab 3/Deliverables/txt b/Lab 3/Deliverables/txt new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/Lab 3/Deliverables/txt @@ -0,0 +1 @@ + diff --git a/Lab 3/README.md b/Lab 3/README.md new file mode 100644 index 0000000000..5fba1e6484 --- /dev/null +++ b/Lab 3/README.md @@ -0,0 +1,328 @@ +# Chatterboxes +**NAMES OF COLLABORATORS HERE** +[![Watch the video](https://user-images.githubusercontent.com/1128669/135009222-111fe522-e6ba-46ad-b6dc-d1633d21129c.png)](https://www.youtube.com/embed/Q8FWzLMobx0?start=19) + +In this lab, we want you to design interaction with a speech-enabled device--something that listens and talks to you. This device can do anything *but* control lights (since we already did that in Lab 1). First, we want you first to storyboard what you imagine the conversational interaction to be like. Then, you will use wizarding techniques to elicit examples of what people might say, ask, or respond. We then want you to use the examples collected from at least two other people to inform the redesign of the device. + +We will focus on **audio** as the main modality for interaction to start; these general techniques can be extended to **video**, **haptics** or other interactive mechanisms in the second part of the Lab. + +## Prep for Part 1: Get the Latest Content and Pick up Additional Parts + +Please check instructions in [prep.md](prep.md) and complete the setup before class on Wednesday, Sept 23rd. + +### Pick up Web Camera If You Don't Have One + +Students who have not already received a web camera will receive their [Logitech C270 Webcam](https://www.amazon.com/Logitech-Desktop-Widescreen-Calling-Recording/dp/B004FHO5Y6/ref=sr_1_3?crid=W5QN79TK8JM7&dib=eyJ2IjoiMSJ9.FB-davgIQ_ciWNvY6RK4yckjgOCrvOWOGAG4IFaH0fczv-OIDHpR7rVTU8xj1iIbn_Aiowl9xMdeQxceQ6AT0Z8Rr5ZP1RocU6X8QSbkeJ4Zs5TYqa4a3C_cnfhZ7_ViooQU20IWibZqkBroF2Hja2xZXoTqZFI8e5YnF_2C0Bn7vtBGpapOYIGCeQoXqnV81r2HypQNUzFQbGPh7VqjqDbzmUoloFA2-QPLa5lOctA.L5ztl0wO7LqzxrIqDku9f96L9QrzYCMftU_YeTEJpGA&dib_tag=se&keywords=webcam%2Bc270&qid=1758416854&sprefix=webcam%2Bc270%2Caps%2C125&sr=8-3&th=1) and bluetooth speaker on Wednesday at the beginning of lab. If you cannot make it to class this week, please contact the TAs to ensure you get these. + +### Get the Latest Content + +As always, pull updates from the class Interactive-Lab-Hub to both your Pi and your own GitHub repo. There are 2 ways you can do so: + +**\[recommended\]**Option 1: On the Pi, `cd` to your `Interactive-Lab-Hub`, pull the updates from upstream (class lab-hub) and push the updates back to your own GitHub repo. You will need the *personal access token* for this. + +``` +pi@ixe00:~$ cd Interactive-Lab-Hub +pi@ixe00:~/Interactive-Lab-Hub $ git pull upstream Fall2025 +pi@ixe00:~/Interactive-Lab-Hub $ git add . +pi@ixe00:~/Interactive-Lab-Hub $ git commit -m "get lab3 updates" +pi@ixe00:~/Interactive-Lab-Hub $ git push +``` + +Option 2: On your your own GitHub repo, [create pull request](https://github.com/FAR-Lab/Developing-and-Designing-Interactive-Devices/blob/2022Fall/readings/Submitting%20Labs.md) to get updates from the class Interactive-Lab-Hub. After you have latest updates online, go on your Pi, `cd` to your `Interactive-Lab-Hub` and use `git pull` to get updates from your own GitHub repo. + +## Part 1. +### Setup + +Activate your virtual environment + +``` +pi@ixe00:~$ cd Interactive-Lab-Hub +pi@ixe00:~/Interactive-Lab-Hub $ cd Lab\ 3 +pi@ixe00:~/Interactive-Lab-Hub/Lab 3 $ python3 -m venv .venv +pi@ixe00:~/Interactive-Lab-Hub $ source .venv/bin/activate +(.venv)pi@ixe00:~/Interactive-Lab-Hub $ +``` + +Run the setup script +```(.venv)pi@ixe00:~/Interactive-Lab-Hub $ pip install -r requirements.txt ``` + +Next, run the setup script to install additional text-to-speech dependencies: +``` +(.venv)pi@ixe00:~/Interactive-Lab-Hub/Lab 3 $ ./setup.sh +``` + +### Text to Speech + +In this part of lab, we are going to start peeking into the world of audio on your Pi! + +We will be using the microphone and speaker on your webcamera. In the directory is a folder called `speech-scripts` containing several shell scripts. `cd` to the folder and list out all the files by `ls`: + +``` +pi@ixe00:~/speech-scripts $ ls +Download festival_demo.sh GoogleTTS_demo.sh pico2text_demo.sh +espeak_demo.sh flite_demo.sh lookdave.wav +``` + +You can run these shell files `.sh` by typing `./filename`, for example, typing `./espeak_demo.sh` and see what happens. Take some time to look at each script and see how it works. You can see a script by typing `cat filename`. For instance: + +``` +pi@ixe00:~/speech-scripts $ cat festival_demo.sh +#from: https://elinux.org/RPi_Text_to_Speech_(Speech_Synthesis)#Festival_Text_to_Speech +``` +You can test the commands by running +``` +echo "Just what do you think you're doing, Dave?" | festival --tts +``` + +Now, you might wonder what exactly is a `.sh` file? +Typically, a `.sh` file is a shell script which you can execute in a terminal. The example files we offer here are for you to figure out the ways to play with audio on your Pi! + +You can also play audio files directly with `aplay filename`. Try typing `aplay lookdave.wav`. + +\*\***Write your own shell file to use your favorite of these TTS engines to have your Pi greet you by name.**\*\* +(This shell file should be saved to your own repo for this lab.) +See file here (in 'speech-scripts' folder) : https://github.com/ji227/Jesse-Iriah-s-Lab-Hub/blob/Fall2025/Lab%203/speech-scripts/my_greeting.sh + +--- +Bonus: +[Piper](https://github.com/rhasspy/piper) is another fast neural based text to speech package for raspberry pi which can be installed easily through python with: +``` +pip install piper-tts +``` +and used from the command line. Running the command below the first time will download the model, concurrent runs will be faster. +``` +echo 'Welcome to the world of speech synthesis!' | piper \ + --model en_US-lessac-medium \ + --output_file welcome.wav +``` +Check the file that was created by running `aplay welcome.wav`. Many more languages are supported and audio can be streamed dirctly to an audio output, rather than into an file by: + +``` +echo 'This sentence is spoken first. This sentence is synthesized while the first sentence is spoken.' | \ + piper --model en_US-lessac-medium --output-raw | \ + aplay -r 22050 -f S16_LE -t raw - +``` + +### Speech to Text + +Next setup speech to text. We are using a speech recognition engine, [Vosk](https://alphacephei.com/vosk/), which is made by researchers at Carnegie Mellon University. Vosk is amazing because it is an offline speech recognition engine; that is, all the processing for the speech recognition is happening onboard the Raspberry Pi. + +Make sure you're running in your virtual environment with the dependencies already installed: +``` +source .venv/bin/activate +``` + +Test if vosk works by transcribing text: + +``` +vosk-transcriber -i recorded_mono.wav -o test.txt +``` + +You can use vosk with the microphone by running +``` +python test_microphone.py -m en +``` + +--- +Bonus: +[Whisper](https://openai.com/index/whisper/) is a neural network–based speech-to-text (STT) model developed and open-sourced by OpenAI. Compared to Vosk, Whisper generally achieves higher accuracy, particularly on noisy audio and diverse accents. It is available in multiple model sizes; for edge devices such as the Raspberry Pi 5 used in this class, the tiny.en model runs with reasonable latency even without a GPU. + +By contrast, Vosk is more lightweight and optimized for running efficiently on low-power devices like the Raspberry Pi. The choice between Whisper and Vosk depends on your scenario: if you need higher accuracy and can afford slightly more compute, Whisper is preferable; if your priority is minimal resource usage, Vosk may be a better fit. + +In this class, we provide two Whisper options: A quantized 8-bit faster-whisper model for speed, and the standard Whisper model. Try them out and compare the trade-offs. + +Make sure you're in the Lab 3 directory with your virtual environment activated: +``` +cd ~/Interactive-Lab-Hub/Lab\ 3/speech-scripts +source ../.venv/bin/activate +``` + +Then test the Whisper models: +``` +python whisper_try.py +``` +and + +``` +python faster_whisper_try.py +``` +\*\***Write your own shell file that verbally asks for a numerical based input (such as a phone number, zipcode, number of pets, etc) and records the answer the respondent provides.**\*\* +See file here (in 'speech-scripts' folder) : https://github.com/ji227/Jesse-Iriah-s-Lab-Hub/blob/Fall2025/Lab%203/speech-scripts/numerical_input.sh + +### 🤖 NEW: AI-Powered Conversations with Ollama + +Want to add intelligent conversation capabilities to your voice projects? **Ollama** lets you run AI models locally on your Raspberry Pi for sophisticated dialogue without requiring internet connectivity! + +#### Quick Start with Ollama + +**Installation** (takes ~5 minutes): +```bash +# Install Ollama +curl -fsSL https://ollama.com/install.sh | sh + +# Download recommended model for Pi 5 +ollama pull phi3:mini + +# Install system dependencies for audio (required for pyaudio) +sudo apt-get update +sudo apt-get install -y portaudio19-dev python3-dev + +# Create separate virtual environment for Ollama (due to pyaudio conflicts) +cd ollama/ +python3 -m venv ollama_venv +source ollama_venv/bin/activate + +# Install Python dependencies in separate environment +pip install -r ollama_requirements.txt +``` +#### Ready-to-Use Scripts + +We've created three Ollama integration scripts for different use cases: + +**1. Basic Demo** - Learn how Ollama works: +```bash +python3 ollama_demo.py +``` + +**2. Voice Assistant** - Full speech-to-text + AI + text-to-speech: +```bash +python3 ollama_voice_assistant.py +``` + +**3. Web Interface** - Beautiful web-based chat with voice options: +```bash +python3 ollama_web_app.py +# Then open: http://localhost:5000 +``` + +#### Integration in Your Projects + +Simple example to add AI to any project: +```python +import requests + +def ask_ai(question): + response = requests.post( + "http://localhost:11434/api/generate", + json={"model": "phi3:mini", "prompt": question, "stream": False} + ) + return response.json().get('response', 'No response') + +# Use it anywhere! +answer = ask_ai("How should I greet users?") +``` + +**📖 Complete Setup Guide**: See `OLLAMA_SETUP.md` for detailed instructions, troubleshooting, and advanced usage! + +\*\***Try creating a simple voice interaction that combines speech recognition, Ollama processing, and text-to-speech output. Document what you built and how users responded to it.**\*\* +See script here ('final_voice_assistant.py in ollama folder) : https://github.com/ji227/Jesse-Iriah-s-Lab-Hub/blob/Fall2025/Lab%203/ollama/final_voice_assistant.py +See documentation on the script and response here: https://docs.google.com/document/d/1MC8Soh6y-xnqsH4-R49oLbx3axFsuhnrAuuwzcrkruw/edit?tab=t.0 + + +### Serving Pages + +In Lab 1, we served a webpage with flask. In this lab, you may find it useful to serve a webpage for the controller on a remote device. Here is a simple example of a webserver. + +``` +pi@ixe00:~/Interactive-Lab-Hub/Lab 3 $ python server.py + * Serving Flask app "server" (lazy loading) + * Environment: production + WARNING: This is a development server. Do not use it in a production deployment. + Use a production WSGI server instead. + * Debug mode: on + * Running on http://0.0.0.0:5000/ (Press CTRL+C to quit) + * Restarting with stat + * Debugger is active! + * Debugger PIN: 162-573-883 +``` +From a remote browser on the same network, check to make sure your webserver is working by going to `http://:5000`. You should be able to see "Hello World" on the webpage. +Screenshot 2025-09-28 at 00 58 12 + +### Storyboard + +Storyboard and/or use a Verplank diagram to design a speech-enabled device. (Stuck? Make a device that talks for dogs. If that is too stupid, find an application that is better than that.) + +\*\***Post your storyboard and diagram here.**\*\* +See documentation with storyboard/diagrams here: https://docs.google.com/document/d/13Gwjj5X3j9nWW3U7r54Km0AkHF1IowsGNWMSfG7pEe8/edit?tab=t.0 + +Write out what you imagine the dialogue to be. Use cards, post-its, or whatever method helps you develop alternatives or group responses. + +\*\***Please describe and document your process.**\*\* +See script here: https://docs.google.com/document/d/1t9Ip9DpQih5_yKYYBYtNqrnhIPVMiVl7mqFzCLVfRVg/edit?tab=t.0 + +### Acting out the dialogue + +Find a partner, and *without sharing the script with your partner* try out the dialogue you've designed, where you (as the device designer) act as the device you are designing. Please record this interaction (for example, using Zoom's record feature). + +\*\***Describe if the dialogue seemed different than what you imagined when it was acted out, and how.**\*\* + +### Wizarding with the Pi (optional) +In the [demo directory](./demo), you will find an example Wizard of Oz project. In that project, you can see how audio and sensor data is streamed from the Pi to a wizard controller that runs in the browser. You may use this demo code as a template. By running the `app.py` script, you can see how audio and sensor data (Adafruit MPU-6050 6-DoF Accel and Gyro Sensor) is streamed from the Pi to a wizard controller that runs in the browser `http://:5000`. You can control what the system says from the controller as well! + +\*\***Describe if the dialogue seemed different than what you imagined, or when acted out, when it was wizarded, and how.**\*\* + +# Lab 3 Part 2 + +For Part 2, you will redesign the interaction with the speech-enabled device using the data collected, as well as feedback from part 1. + +## Prep for Part 2 + +1. What are concrete things that could use improvement in the design of your device? For example: wording, timing, anticipation of misunderstandings... +2. What are other modes of interaction _beyond speech_ that you might also use to clarify how to interact? +3. Make a new storyboard, diagram and/or script based on these reflections. + +## Prototype your system + +The system should: +* use the Raspberry Pi +* use one or more sensors +* require participants to speak to it. + +*Document how the system works* + +*Include videos or screencaptures of both the system and the controller.* + +
+ Submission Cleanup Reminder (Click to Expand) + + **Before submitting your README.md:** + - This readme.md file has a lot of extra text for guidance. + - Remove all instructional text and example prompts from this file. + - You may either delete these sections or use the toggle/hide feature in VS Code to collapse them for a cleaner look. + - Your final submission should be neat, focused on your own work, and easy to read for grading. + + This helps ensure your README.md is clear professional and uniquely yours! +
+ +## Test the system +Try to get at least two people to interact with your system. (Ideally, you would inform them that there is a wizard _after_ the interaction, but we recognize that can be hard.) + +Answer the following: + +### What worked well about the system and what didn't? +\*\**your answer here*\*\* + +### What worked well about the controller and what didn't? + +\*\**your answer here*\*\* + +### What lessons can you take away from the WoZ interactions for designing a more autonomous version of the system? + +\*\**your answer here*\*\* + + +### How could you use your system to create a dataset of interaction? What other sensing modalities would make sense to capture? + +\*\**your answer here*\*\* + + + + + + + + + + + + diff --git a/Lab 3/demo/README.md b/Lab 3/demo/README.md new file mode 100644 index 0000000000..83c64f4e50 --- /dev/null +++ b/Lab 3/demo/README.md @@ -0,0 +1,84 @@ +# Magic Ball WoZ + +This is a Demo App for a Wizard of Oz interactive system where the wizard is playing a magic 8 ball + + + +## Hardware Set-Up + +For this demo, you will need: +* your Raspberry Pi, +* a Qwiic/Stemma Cable, +* the display (we are just using it for the Qwiic/StemmaQT port. Feel free to use the display in your projects), +* your accelerometer, and +* your web camera + + +

+ + +Plug the display in and connect the accelerometer to the port underneath with your Qwiic connector cable. Plug the web camera into the raspberry pi. + +## Software Setup + +Ssh on to your Raspberry Pi as we've done previously + +`ssh pi@yourHostname.local` + +Ensure audio is playing through the aux connector by typing + +`sudo raspi-config` + +on `system options` hit enter. Go down to `s2 Audio` and hit enter. Select `1 USB Audio` and hit enter. Then navigate to `` and exit the config menu. + +We will need one additional piece of software called VLC Media player. To install it type `sudo apt-get install vlc` + + +I would suggest making a new virtual environment for this demo then navigating to this folder and installing the requirements. + +``` +pi@yourHostname:~ $ virtualenv woz +pi@yourHostname:~ $ source woz/bin/activate +(woz) pi@yourHostname:~ $ cd Interactive-Lab-Hub/Lab\ 3/demo +(woz) pi@yourHostname:~/Interactive-Lab-Hub/Lab 3/demo $ +(woz) pi@yourHostname:~/Interactive-Lab-Hub/Lab 3/demo $ pip install -r requirements.txt +``` + +## Running + +To run the app + +`(woz) pi@yourHostname:~/Interactive-Lab-Hub/Lab 3/demo $ python app.py` + +In the browser of a computer on the same network, navigate to http://yourHostname.local:5000/ where in my case my hostname is ixe00 + +![](./imgs/page.png) + +The interface will immediately begin streaming the accelerometer to let you know if your participant shakes their Magic 8 ball. The "eavesdrop" button will begin streaming audio from the Pi to your browser (note there is a noticeable delay it is best to start eavesdropping right at the beginning). To have the Pi speak, you can write in the text box and hit send or press enter. + +## Notes + +You may need to change line 26 in `app.py` + +``` +hardware = 'plughw:2,0' +``` + +This is the soundcard and hardware device associated with the USB microphone. To check, you can run `python get_device.py` which will output A LOT of nonsense. At the end, you will see + +``` +0 bcm2835 Headphones: - (hw:0,0) +1 webcamproduct: USB Audio (hw:2,0) +2 sysdefault +3 lavrate +4 samplerate +5 speexrate +6 pulse +7 upmix +8 vdownmix +9 dmix +10 default +``` + +In our case, `webcamproduct: USB Audio (hw:2,0)` is the name of our microphone and the index is in parenthesis. + diff --git a/Lab 3/demo/app.py b/Lab 3/demo/app.py new file mode 100644 index 0000000000..cd4f75affe --- /dev/null +++ b/Lab 3/demo/app.py @@ -0,0 +1,63 @@ +import eventlet +eventlet.monkey_patch() + +from flask import Flask, Response,render_template +from flask_socketio import SocketIO, send, emit +from subprocess import Popen, call + +import time +import board +import busio +#import adafruit_mpu6050 +from adafruit_msa3xx import MSA311 +import json +import socket + +import signal +import sys +from queue import Queue + + +i2c = busio.I2C(board.SCL, board.SDA) +#mpu = adafruit_mpu6050.MPU6050(i2c) +msa = MSA311(i2c) + +hostname = socket.gethostname() +hardware = 'plughw:2,0' + +app = Flask(__name__) +socketio = SocketIO(app) +audio_stream = Popen("/usr/bin/cvlc alsa://"+hardware+" --sout='#transcode{vcodec=none,acodec=mp3,ab=256,channels=2,samplerate=44100,scodec=none}:http{mux=mp3,dst=:8080/}' --no-sout-all --sout-keep", shell=True) + +@socketio.on('speak') +def handel_speak(val): + call(f"espeak '{val}'", shell=True) + +@socketio.on('connect') +def test_connect(): + print('connected') + emit('after connect', {'data':'Lets dance'}) + +@socketio.on('ping-gps') +def handle_message(val): + # print(mpu.acceleration) + emit('pong-gps', msa.acceleration) + + + +@app.route('/') +def index(): + return render_template('index.html', hostname=hostname) + +def signal_handler(sig, frame): + print('Closing Gracefully') + audio_stream.terminate() + sys.exit(0) + +signal.signal(signal.SIGINT, signal_handler) + + +if __name__ == "__main__": + socketio.run(app, host='0.0.0.0', port=5000) + + diff --git a/Lab 3/demo/get_device.py b/Lab 3/demo/get_device.py new file mode 100644 index 0000000000..2a8dfcc3de --- /dev/null +++ b/Lab 3/demo/get_device.py @@ -0,0 +1,6 @@ +import pyaudio + +audio = pyaudio.PyAudio() + +for ii in range(audio.get_device_count()): + print(ii, audio.get_device_info_by_index(ii).get('name')) diff --git a/Lab 3/demo/imgs/page.png b/Lab 3/demo/imgs/page.png new file mode 100644 index 0000000000..c633381a68 Binary files /dev/null and b/Lab 3/demo/imgs/page.png differ diff --git a/Lab 3/demo/requirements.txt b/Lab 3/demo/requirements.txt new file mode 100644 index 0000000000..0efd65fb6c --- /dev/null +++ b/Lab 3/demo/requirements.txt @@ -0,0 +1,33 @@ +Adafruit-Blinka==6.4.0 +adafruit-circuitpython-busdevice==5.0.6 +adafruit-circuitpython-msa301==1.3.0 +adafruit-circuitpython-mpu6050==1.1.6 +adafruit-circuitpython-msa301==1.3.0 +adafruit-circuitpython-register==1.9.5 +Adafruit-PlatformDetect==3.3.0 +Adafruit-PureIO==1.1.8 +bidict==0.21.2 +click==7.1.2 +dnspython==1.16.0 +eventlet==0.31.0 +Flask==1.1.2 +Flask-SocketIO==5.0.1 +gevent==21.1.2 +gevent-websocket==0.10.1 +greenlet==1.0.0 +itsdangerous==1.1.0 +Jinja2==2.11.3 +MarkupSafe==1.1.1 +PyAudio==0.2.11 +pyftdi==0.52.9 +pyserial==3.5 +python-engineio==4.0.1 +python-socketio==5.1.0 +pyusb==1.1.1 +rpi-ws281x==4.2.6 +RPi.GPIO==0.7.1 +six==1.15.0 +sysv-ipc==1.1.0 +Werkzeug==1.0.1 +zope.event==4.5.0 +zope.interface==5.2.0 diff --git a/Lab 3/demo/static/index.js b/Lab 3/demo/static/index.js new file mode 100644 index 0000000000..8fedc35238 --- /dev/null +++ b/Lab 3/demo/static/index.js @@ -0,0 +1,196 @@ +// const control = document.getElementById('control'); +// const light = document.getElementById('light'); +// const play = document.getElementById('play'); +// const pause = document.getElementById('pause'); +// const audioIn = document.getElementById('audioIn'); +// const audio = new Audio(); +// let pickr; + +// const socket = io(); + +// socket.on('connect', () => { +// socket.on('hex', (val) => {document.body.style.backgroundColor = val}) +// socket.on('audio', (val) => {getSound(encodeURI(val));}) +// socket.on('pauseAudio', (val) => {audio.pause();}) +// socket.onAny((event, ...args) => { +// console.log(event, args); +// }); +// }); + +// // enter controller mode +// control.onclick = () => { +// console.log('control') +// // make sure you're not in fullscreen +// if (document.fullscreenElement) { +// document.exitFullscreen() +// .then(() => console.log('exited full screen mode')) +// .catch((err) => console.error(err)); +// } +// // make buttons and controls visible +// document.getElementById('user').classList.remove('fadeOut'); +// document.getElementById('controlPanel').style.opacity = 0.6; +// if (!pickr) { +// // create our color picker. You can change the swatches that appear at the bottom +// pickr = Pickr.create({ +// el: '.pickr', +// theme: 'classic', +// showAlways: true, +// swatches: [ +// 'rgba(255, 255, 255, 1)', +// 'rgba(244, 67, 54, 1)', +// 'rgba(233, 30, 99, 1)', +// 'rgba(156, 39, 176, 1)', +// 'rgba(103, 58, 183, 1)', +// 'rgba(63, 81, 181, 1)', +// 'rgba(33, 150, 243, 1)', +// 'rgba(3, 169, 244, 1)', +// 'rgba(0, 188, 212, 1)', +// 'rgba(0, 150, 136, 1)', +// 'rgba(76, 175, 80, 1)', +// 'rgba(139, 195, 74, 1)', +// 'rgba(205, 220, 57, 1)', +// 'rgba(255, 235, 59, 1)', +// 'rgba(255, 193, 7, 1)', +// 'rgba(0, 0, 0, 1)', +// ], +// components: { +// preview: false, +// opacity: false, +// hue: true, +// }, +// }); + +// pickr.on('change', (e) => { +// // when pickr color value is changed change background and send message on ws to change background +// const hexCode = e.toHEXA().toString(); +// document.body.style.backgroundColor = hexCode; +// socket.emit('hex', hexCode) +// }); +// } +// }; + +// light.onclick = () => { +// // safari requires playing on input before allowing audio +// audio.muted = true; +// audio.play().then(audio.muted=false) + +// // in light mode make it full screen and fade buttons +// document.documentElement.requestFullscreen(); +// document.getElementById('user').classList.add('fadeOut'); +// // if you were previously in control mode remove color picker and hide controls +// if (pickr) { +// // this is annoying because of the pickr package +// pickr.destroyAndRemove(); +// document.getElementById('controlPanel').append(Object.assign(document.createElement('div'), { className: 'pickr' })); +// pickr = undefined; +// } +// document.getElementById('controlPanel').style.opacity = 0; +// }; + + +// const getSound = (query, loop = false, random = false) => { +// const url = `https://freesound.org/apiv2/search/text/?query=${query}+"&fields=name,previews&token=U5slaNIqr6ofmMMG2rbwJ19mInmhvCJIryn2JX89&format=json`; +// fetch(url) +// .then((response) => response.clone().text()) +// .then((data) => { +// console.log(data); +// data = JSON.parse(data); +// if (data.results.length >= 1) var src = random ? choice(data.results).previews['preview-hq-mp3'] : data.results[0].previews['preview-hq-mp3']; +// audio.src = src; +// audio.play(); +// console.log(src); +// }) +// .catch((error) => console.log(error)); +// }; + +// play.onclick = () => { +// socket.emit('audio', audioIn.value) +// getSound(encodeURI(audioIn.value)); +// }; +// pause.onclick = () => { +// socket.emit('pauseAudio', audioIn.value) +// audio.pause(); +// }; +// audioIn.onkeyup = (e) => { if (e.keyCode === 13) { play.click(); } }; + +const socket = io(); +socket.on('connect', () => { +// socket.onAny((event, ...args) => { +// console.log(Date.now(),event, args); +// }); +}); + +const mic = document.getElementById('mic'); +const play = document.getElementById('play'); +const wordsIn = document.getElementById('wordsIn'); +const send = document.getElementById('send'); + +const src = mic.src +mic.src = '' + +play.onclick = () => { + if(mic.paused) { + console.log('redo audio') + mic.src = src + mic.play() + play.innerText='Pause' + } else { + mic.pause() + mic.src = ''; + play.innerText='Eavesdrop' + } + +} + +send.onclick = () => { + socket.emit('speak', wordsIn.value) + wordsIn.value = '' +} +wordsIn.onkeyup = (e) => { if (e.keyCode === 13) { send.click(); } }; + +setInterval(() => { + socket.emit('ping-gps', 'dat') +}, 100) + +socket.on('disconnect', () => { + console.log('disconnect') + mic.src = '' + + }); + +var vlSpec = { + $schema: 'https://vega.github.io/schema/vega-lite/v5.json', + data: {name: 'table'}, + width: 400, + mark: 'line', + encoding: { + x: {field: 'x', type: 'quantitative', scale: {zero: false}}, + y: {field: 'y', type: 'quantitative'}, + color: {field: 'category', type: 'nominal'} + } +}; +vegaEmbed('#chart', vlSpec).then( (res) => { + let x, y, z; + let counter = -1; + let cat = ['x', 'y', 'z'] + let minimumX = -100; + socket.on('pong-gps', (new_x,new_y,new_z) => { + counter++; + minimumX++; + const newVals = [new_x, new_y, new_z].map((c,v) => { + return { + x: counter, + y: c, + category: cat[v] + }; + }) + const changeSet = vega + .changeset() + .insert(newVals) + .remove( (t) => { + return t.x < minimumX; + }); + res.view.change('table', changeSet).run(); + }) + +}) diff --git a/Lab 3/demo/static/style.css b/Lab 3/demo/static/style.css new file mode 100644 index 0000000000..7068cde4d1 --- /dev/null +++ b/Lab 3/demo/static/style.css @@ -0,0 +1 @@ +/**/ \ No newline at end of file diff --git a/Lab 3/demo/templates/index.html b/Lab 3/demo/templates/index.html new file mode 100644 index 0000000000..6b9af227a7 --- /dev/null +++ b/Lab 3/demo/templates/index.html @@ -0,0 +1,30 @@ + + + + + + + + + + + + + Document + + +

+
+ + +
+
+ + + + diff --git a/Lab 3/lab1note.md b/Lab 3/lab1note.md new file mode 100644 index 0000000000..025d193b8e --- /dev/null +++ b/Lab 3/lab1note.md @@ -0,0 +1,15 @@ +# Lab 1 Grading Notes + +Here are some notes I took while grading Lab 1: + +1. At the start of your README file, please include your collaborator’s name along with their **netID**. Also, list both your registered name on Canvas and your preferred name. +2. Think of your submission as the documentation of what you’ve done. The golden standard is: if you show the README to someone who has no idea what your project is, they should be able to replicate it by following your instructions. +3. Format matters. Check the instructions [here](https://docs.github.com/en/get-started/writing-on-github/getting-started-with-writing-and-formatting-on-github/basic-writing-and-formatting-syntax). +4. Peer evaluation and feedback matter. You should document their comments, and write down their words and netID. You’re also welcome to interview and present to people outside the class. +5. Don’t put raw videos directly in your Hub—it takes up too much storage. If you clone the Hub, the video will be downloaded to your Pi5, which doesn’t have much space for large files you don’t actually need. The same goes for large images. Instead, upload them to cloud storage such as Google Drive or [Cornell Box](https://it.cornell.edu/box). +6. Feel free to remove the comments and instructions we provided for the earlier part of the lab. You don’t need to leave them in your file unless you find them necessary. Again, think of the task as documenting your **own progress**. +7. Here are some good examples of Lab 1. Check how they organize their documentation and how the concept evolves into the actual working prototype: + - [Thomas Knoepffler, Rajvi Ranjit Patil, Om Kamath, Laura Moreno](https://github.com/thomknoe/INFO-5345/tree/Fall2025/Lab%201) + - [Wenzhuo Ma, Yoyo Wang](https://github.com/mawenzhuo2022/Interactive-Lab-Hub/tree/Fall2025/Lab%201) + - [Jully Li, Sirui Wang, Amy Chen](https://github.com/ac3295-lab/Interactive-Lab-Hub/tree/Fall2025/Lab%201) + - [Weicong Hong (wh528), Feier Su (fs495)](https://github.com/wendyyh/Interactive-Lab-Hub/blob/Fall2025/Lab%201/README.md) diff --git a/Lab 3/ollama/OLLAMA_SETUP.md b/Lab 3/ollama/OLLAMA_SETUP.md new file mode 100644 index 0000000000..cb5dd5bc01 --- /dev/null +++ b/Lab 3/ollama/OLLAMA_SETUP.md @@ -0,0 +1,187 @@ +# Ollama Setup Instructions for Lab 3 +*Interactive Device Design - Voice and Speech Prototypes* + +## What is Ollama? + +Ollama is a tool that lets you run large language models (like ChatGPT, but smaller) locally on your Raspberry Pi. This means your voice assistant can have intelligent conversations without needing internet connectivity for AI processing! + +## 🚀 Quick Setup + +### Step 1: Install Ollama + +Run this command in your terminal: + +```bash +curl -fsSL https://ollama.com/install.sh | sh +``` + +### Step 2: Download a Model + +We recommend **phi3:mini** for Raspberry Pi 5 - it's fast, lightweight, and smart enough for prototyping: + +```bash +ollama pull phi3:mini +``` + +*This will download about 2.2GB, so make sure you have good internet and some patience!* + +### Step 3: Test Your Installation + +```bash +ollama run phi3:mini "Hello, introduce yourself!" +``` + +You should see a response from the AI model. + +### Step 4: Install Python Dependencies + +```bash +pip install requests speechrecognition pyaudio flask flask-socketio +``` + +## 🎯 Ready-to-Use Scripts + +We've created several scripts for different use cases: + +### 1. Simple Demo (`ollama_demo.py`) +```bash +python3 ollama_demo.py +``` +- Basic text chat with Ollama +- Text-to-speech responses +- Perfect for understanding how Ollama works + +### 2. Voice Assistant (`ollama_voice_assistant.py`) +```bash +python3 ollama_voice_assistant.py +``` +- Full voice interaction (speech-to-text + Ollama + text-to-speech) +- Natural conversation flow +- Say "hello" to start, "goodbye" to exit + +### 3. Web Interface (`ollama_web_app.py`) +```bash +python3 ollama_web_app.py +``` +- Beautiful web interface at `http://localhost:5000` +- Chat interface with voice options +- Great for prototyping web-based voice interactions + +## 🔧 Troubleshooting + +### "Ollama not responding" +Make sure Ollama is running: +```bash +ollama serve +``` +Then try your script again. + +### "Model not found" +List available models: +```bash +ollama list +``` +If phi3:mini isn't there, pull it again: +```bash +ollama pull phi3:mini +``` + +### Speech Recognition Issues +Make sure your microphone is working: +```bash +python3 speech-scripts/test_microphone.py +``` + +### Audio Output Issues +Test with espeak: +```bash +espeak "Hello from your Pi" +``` + +## 📚 For Your Projects + +### Quick Integration Template + +```python +import requests + +def ask_ollama(question): + response = requests.post( + "http://localhost:11434/api/generate", + json={ + "model": "phi3:mini", + "prompt": question, + "stream": False + } + ) + return response.json().get('response', 'Sorry, no response') + +# Use it in your project +answer = ask_ollama("What's the weather like?") +print(answer) +``` + +### Make Your Assistant Specialized + +Add a system prompt to make your assistant behave differently: + +```python +def ask_specialized_ollama(question, personality): + response = requests.post( + "http://localhost:11434/api/generate", + json={ + "model": "phi3:mini", + "prompt": question, + "system": personality, # This changes behavior! + "stream": False + } + ) + return response.json().get('response', 'Sorry, no response') + +# Examples: +chef_response = ask_specialized_ollama( + "What should I cook?", + "You are a helpful chef. Give short, practical cooking advice." +) + +therapist_response = ask_specialized_ollama( + "I'm feeling stressed", + "You are a supportive counselor. Be empathetic and encouraging." +) +``` + +## 🎨 Creative Ideas for Your Project + +1. **Smart Home Assistant**: "Turn on the lights" → Ollama processes → controls GPIO +2. **Language Tutor**: Practice conversations in different languages +3. **Storytelling Device**: Interactive storytelling with AI-generated plots +4. **Cooking Assistant**: Voice-controlled recipe helper +5. **Study Buddy**: AI tutor that adapts to your learning style +6. **Emotion Support**: An empathetic companion for daily check-ins +7. **Game Master**: AI-powered text adventure games +8. **Creative Writing Partner**: Collaborative story creation + +## 📖 Additional Resources + +- [Ollama Documentation](https://docs.ollama.com) +- [Available Models](https://ollama.com/library) (try different ones!) +- [Ollama API Reference](https://docs.ollama.com/api) + +## 🆘 Getting Help + +1. Check the troubleshooting section above +2. Ask in the class Slack channel +3. Use WendyTA (mention "@Ollama" in your question) +4. Office hours with TAs + +## 🏆 Pro Tips + +1. **Model Size vs Speed**: Smaller models (like phi3:mini) are faster but less capable +2. **Internet Independence**: Once downloaded, models work offline! +3. **Experiment**: Try different system prompts to change personality +4. **Combine with Sensors**: Use Pi sensors + Ollama for context-aware responses +5. **Memory**: Each conversation is independent - add conversation history if needed + +--- + +*Happy prototyping! Remember: the goal is to rapidly iterate and test ideas with real users.* \ No newline at end of file diff --git a/Lab 3/ollama/final_voice_assistant.py b/Lab 3/ollama/final_voice_assistant.py new file mode 100644 index 0000000000..16b24406a3 --- /dev/null +++ b/Lab 3/ollama/final_voice_assistant.py @@ -0,0 +1,93 @@ +# ollama_voice_assistant.py + +import speech_recognition as sr +import requests +import os +import time + +# --- Configuration --- +MICROPHONE_INDEX = 2 +MODEL_NAME = "phi3:mini" +OLLAMA_URL = "http://localhost:11434/api/generate" + +ENERGY_THRESHOLD = 150 + +def speak(text): + """Uses the espeak command line tool for Text-to-Speech with better parameters.""" + + text = text.replace("'", "'\\''") + # -v en+f3: Female English voice | -s 150: Speed 150 WPM | -k 15: Pitch/Inflection + print(f"AI Speaking: {text}") + os.system(f"espeak -v en+f3 -s 150 -k 15 '{text}' 2>/dev/null") + +def transcribe_speech(): + """Listens for user input and converts it to text.""" + r = sr.Recognizer() + try: + with sr.Microphone(device_index=MICROPHONE_INDEX) as source: + r.adjust_for_ambient_noise(source) + r.energy_threshold = ENERGY_THRESHOLD + + print("\nListening... Speak now.") + speak("Ready. Ask me anything.") + + + time.sleep(0.5) + + audio = r.listen(source, timeout=8, phrase_time_limit=15) + + except Exception as e: + print(f"Microphone error: {e}. Check MICROPHONE_INDEX ({MICROPHONE_INDEX}).") + speak("I am having trouble accessing the microphone.") + return None + + try: + print("Transcribing via Google Speech Recognition...") + text = r.recognize_google(audio) + print(f"User Said: {text}") + return text + except sr.UnknownValueError: + print("Could not understand audio.") + speak("I didn't catch that. Could you repeat it?") + return None + except sr.RequestError as e: + print(f"Speech recognition service error: {e}") + speak("My transcription service is currently unavailable.") + return None + +def ask_ai(question): + """Sends the question to the local Ollama model.""" + print("Sending request to Ollama...") + try: + # Long timeout (120 seconds) for the RPi's slow processing + response = requests.post( + OLLAMA_URL, + json={"model": MODEL_NAME, "prompt": question, "stream": False}, + timeout=120 + ) + response.raise_for_status() + return response.json().get('response', 'No response received from the model.') + except requests.exceptions.RequestException as e: + print(f"Error communicating with Ollama: {e}") + return "I seem to be having trouble connecting to the AI model on port 11434." + +def main_assistant_loop(): + """Main loop for the voice assistant.""" + speak("Voice assistant is active. Say 'stop' to quit.") + while True: + user_text = transcribe_speech() + + if user_text: + if "stop" in user_text.lower() or "exit" in user_text.lower() or "quit" in user_text.lower(): + speak("Goodbye.") + print("Exiting assistant.") + break + + ai_response = ask_ai(user_text) + + if ai_response: + print(f"AI Response: {ai_response}") + speak(ai_response) + +if __name__ == "__main__": + main_assistant_loop() diff --git a/Lab 3/ollama/ollama_demo.py b/Lab 3/ollama/ollama_demo.py new file mode 100644 index 0000000000..11ff65fbc6 --- /dev/null +++ b/Lab 3/ollama/ollama_demo.py @@ -0,0 +1,133 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Simple Ollama Demo for Lab 3 +Basic example of integrating Ollama with voice I/O + +This script demonstrates: +1. Text input to Ollama +2. Voice input to Ollama +3. Voice output from Ollama +""" + +import requests +import json +import subprocess +import sys +import os + +# Set UTF-8 encoding for output +if sys.stdout.encoding != 'UTF-8': + import codecs + sys.stdout = codecs.getwriter('utf-8')(sys.stdout.buffer, 'strict') +if sys.stderr.encoding != 'UTF-8': + import codecs + sys.stderr = codecs.getwriter('utf-8')(sys.stderr.buffer, 'strict') + +def speak_text(text): + """Simple text-to-speech using espeak""" + # Clean text to avoid encoding issues + clean_text = text.encode('ascii', 'ignore').decode('ascii') + print(f"Assistant: {clean_text}") + subprocess.run(['espeak', f'"{clean_text}"'], shell=True, check=False) + +def query_ollama(prompt, model="phi3:mini"): + """Send a text prompt to Ollama and get response""" + try: + response = requests.post( + "http://localhost:11434/api/generate", + json={ + "model": model, + "prompt": prompt, + "stream": False + }, + timeout=30 + ) + + if response.status_code == 200: + return response.json().get('response', 'No response') + else: + return f"Error: {response.status_code}" + + except Exception as e: + return f"Error: {e}" + +def text_chat_demo(): + """Simple text-based chat with Ollama""" + print("\n=== TEXT CHAT DEMO ===") + print("Type 'quit' to exit") + + while True: + user_input = input("\nYou: ") + if user_input.lower() in ['quit', 'exit']: + break + + print("Thinking...") + response = query_ollama(user_input) + print(f"Ollama: {response}") + +def voice_response_demo(): + """Demo: Text input, voice output""" + print("\n=== VOICE RESPONSE DEMO ===") + print("Type your message, Ollama will respond with voice") + print("Type 'quit' to exit") + + while True: + user_input = input("\nYour message: ") + if user_input.lower() in ['quit', 'exit']: + break + + print("Thinking...") + response = query_ollama(user_input) + speak_text(response) + +def check_ollama(): + """Check if Ollama is running and model is available""" + try: + response = requests.get("http://localhost:11434/api/tags") + if response.status_code == 200: + models = response.json().get('models', []) + model_names = [m['name'] for m in models] + print(f"Ollama is running. Available models: {model_names}") + return True + else: + print("Ollama is not responding") + return False + except Exception as e: + print(f"Cannot connect to Ollama: {e}") + print("Make sure Ollama is running with: ollama serve") + return False + +def main(): + """Main demo menu""" + print("Ollama Lab 3 Demo") + print("=" * 30) + + # Check Ollama connection + if not check_ollama(): + return + + while True: + print("\nChoose a demo:") + print("1. Text Chat (type to Ollama)") + print("2. Voice Response (Ollama speaks responses)") + print("3. Test Ollama (simple query)") + print("4. Exit") + + choice = input("\nEnter choice (1-4): ") + + if choice == "1": + text_chat_demo() + elif choice == "2": + voice_response_demo() + elif choice == "3": + response = query_ollama("Say hello and introduce yourself briefly") + print(f"Ollama: {response}") + elif choice == "4": + print("Goodbye!") + break + else: + print("Invalid choice. Please try again.") + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/Lab 3/ollama/ollama_requirements.txt b/Lab 3/ollama/ollama_requirements.txt new file mode 100644 index 0000000000..39a19ff33a --- /dev/null +++ b/Lab 3/ollama/ollama_requirements.txt @@ -0,0 +1,24 @@ +# Requirements for Ollama Integration in Lab 3 + +# Core dependencies for Ollama integration +requests>=2.31.0 +flask>=2.3.0 +flask-socketio>=5.3.0 + +# Voice processing dependencies +SpeechRecognition>=3.10.0 +pyaudio>=0.2.11 +pyttsx3>=2.90 + +# Alternative TTS (system-level, installed via apt) +# espeak (install with: sudo apt-get install espeak) + +# Optional: Enhanced audio processing +# sounddevice>=0.4.6 +# numpy>=1.24.0 + +# For web interface +eventlet>=0.33.3 + +# Note: Ollama itself is installed separately using the install script: +# curl -fsSL https://ollama.com/install.sh | sh \ No newline at end of file diff --git a/Lab 3/ollama/ollama_voice_assistant.py b/Lab 3/ollama/ollama_voice_assistant.py new file mode 100644 index 0000000000..3e5dfc6afd --- /dev/null +++ b/Lab 3/ollama/ollama_voice_assistant.py @@ -0,0 +1,213 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Ollama Voice Assistant for Lab 3 +Interactive voice assistant using speech recognition, Ollama AI, and text-to-speech + +Dependencies: +- ollama (API client) +- speech_recognition +- pyaudio +- pyttsx3 or espeak +""" + +import speech_recognition as sr +import subprocess +import requests +import json +import time +import sys +import threading +from queue import Queue + +# Set UTF-8 encoding for output +if sys.stdout.encoding != 'UTF-8': + import codecs + sys.stdout = codecs.getwriter('utf-8')(sys.stdout.buffer, 'strict') +if sys.stderr.encoding != 'UTF-8': + import codecs + sys.stderr = codecs.getwriter('utf-8')(sys.stderr.buffer, 'strict') + +try: + import pyttsx3 + TTS_ENGINE = 'pyttsx3' +except ImportError: + TTS_ENGINE = 'espeak' + print("pyttsx3 not available, using espeak for TTS") + +class OllamaVoiceAssistant: + def __init__(self, model_name="phi3:mini", ollama_url="http://localhost:11434"): + self.model_name = model_name + self.ollama_url = ollama_url + self.recognizer = sr.Recognizer() + self.microphone = sr.Microphone() + + # Initialize TTS + if TTS_ENGINE == 'pyttsx3': + self.tts_engine = pyttsx3.init() + self.tts_engine.setProperty('rate', 150) # Speed of speech + + # Test Ollama connection + self.test_ollama_connection() + + # Adjust for ambient noise + print("Adjusting for ambient noise... Please wait.") + with self.microphone as source: + self.recognizer.adjust_for_ambient_noise(source) + print("Ready for conversation!") + + def test_ollama_connection(self): + """Test if Ollama is running and the model is available""" + try: + response = requests.get(f"{self.ollama_url}/api/tags") + if response.status_code == 200: + models = response.json().get('models', []) + model_names = [m['name'] for m in models] + if self.model_name in model_names: + print(f"Ollama is running with {self.model_name} model") + else: + print(f"Model {self.model_name} not found. Available models: {model_names}") + if model_names: + self.model_name = model_names[0] + print(f"Using {self.model_name} instead") + else: + raise Exception("Ollama API not responding") + except Exception as e: + print(f"Error connecting to Ollama: {e}") + print("Make sure Ollama is running: 'ollama serve'") + sys.exit(1) + + def speak(self, text): + """Convert text to speech""" + # Clean text to avoid encoding issues + clean_text = text.encode('ascii', 'ignore').decode('ascii') + print(f"Assistant: {clean_text}") + + if TTS_ENGINE == 'pyttsx3': + self.tts_engine.say(clean_text) + self.tts_engine.runAndWait() + else: + # Use espeak as fallback + subprocess.run(['espeak', clean_text], check=False) + + def listen(self): + """Listen for speech and convert to text""" + try: + print("Listening...") + with self.microphone as source: + # Listen for audio with timeout + audio = self.recognizer.listen(source, timeout=5, phrase_time_limit=10) + + print("Recognizing...") + # Use Google Speech Recognition (free) + text = self.recognizer.recognize_google(audio) + print(f"You said: {text}") + return text.lower() + + except sr.WaitTimeoutError: + print("No speech detected, timing out...") + return None + except sr.UnknownValueError: + print("Could not understand audio") + return None + except sr.RequestError as e: + print(f"Error with speech recognition service: {e}") + return None + + def query_ollama(self, prompt, system_prompt=None): + """Send a query to Ollama and get response""" + try: + data = { + "model": self.model_name, + "prompt": prompt, + "stream": False + } + + if system_prompt: + data["system"] = system_prompt + + response = requests.post( + f"{self.ollama_url}/api/generate", + json=data, + timeout=30 + ) + + if response.status_code == 200: + result = response.json() + return result.get('response', 'Sorry, I could not generate a response.') + else: + return f"Error: Ollama API returned status {response.status_code}" + + except requests.exceptions.Timeout: + return "Sorry, the response took too long. Please try again." + except Exception as e: + return f"Error communicating with Ollama: {e}" + + def run_conversation(self): + """Main conversation loop""" + print("\nOllama Voice Assistant Started!") + print("Say 'hello' to start, 'exit' or 'quit' to stop") + print("=" * 50) + + # System prompt to make the assistant more conversational + system_prompt = """You are a helpful voice assistant. Keep your responses concise and conversational, + typically 1-2 sentences. Be friendly and engaging. You are running on a Raspberry Pi as part of an + interactive device design lab.""" + + self.speak("Hello! I'm your Ollama voice assistant. How can I help you today?") + + while True: + try: + # Listen for user input + user_input = self.listen() + + if user_input is None: + continue + + # Check for exit commands + if any(word in user_input for word in ['exit', 'quit', 'bye', 'goodbye']): + self.speak("Goodbye! Have a great day!") + break + + # Check for greeting + if any(word in user_input for word in ['hello', 'hi', 'hey']): + self.speak("Hello! What would you like to talk about?") + continue + + # Send to Ollama for processing + print("Thinking...") + response = self.query_ollama(user_input, system_prompt) + + # Speak the response + self.speak(response) + + except KeyboardInterrupt: + print("\nConversation interrupted by user") + self.speak("Goodbye!") + break + except Exception as e: + print(f"Unexpected error: {e}") + self.speak("Sorry, I encountered an error. Let's try again.") + +def main(): + """Main function to run the voice assistant""" + print("Starting Ollama Voice Assistant...") + + # Check if required dependencies are available + try: + import speech_recognition + import requests + except ImportError as e: + print(f"Missing dependency: {e}") + print("Please install with: pip install speechrecognition requests pyaudio") + return + + # Create and run the assistant + try: + assistant = OllamaVoiceAssistant() + assistant.run_conversation() + except Exception as e: + print(f"Failed to start assistant: {e}") + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/Lab 3/ollama/ollama_web_app.py b/Lab 3/ollama/ollama_web_app.py new file mode 100644 index 0000000000..a5b896f6c5 --- /dev/null +++ b/Lab 3/ollama/ollama_web_app.py @@ -0,0 +1,139 @@ +#!/usr/bin/env python3 +""" +Ollama Flask Web Interface for Lab 3 +Web-based voice assistant using Ollama + +This extends the existing Flask app in demo/app.py to include Ollama integration +""" + +import eventlet +eventlet.monkey_patch() + +from flask import Flask, Response, render_template, request, jsonify +from flask_socketio import SocketIO, send, emit +import requests +import json +import subprocess +import os + +app = Flask(__name__) +socketio = SocketIO(app, cors_allowed_origins="*") + +# Ollama configuration +OLLAMA_URL = "http://localhost:11434" +DEFAULT_MODEL = "phi3:mini" + +def query_ollama(prompt, model=DEFAULT_MODEL): + """Query Ollama and return response""" + try: + response = requests.post( + f"{OLLAMA_URL}/api/generate", + json={ + "model": model, + "prompt": prompt, + "stream": False + }, + timeout=30 + ) + + if response.status_code == 200: + return response.json().get('response', 'No response generated') + else: + return f"Error: Ollama returned status {response.status_code}" + + except requests.exceptions.Timeout: + return "Sorry, the response took too long. Please try again." + except Exception as e: + return f"Error: {str(e)}" + +def speak_text(text): + """Text-to-speech using espeak""" + try: + subprocess.run(['espeak', f'"{text}"'], shell=True, check=False) + except Exception as e: + print(f"TTS Error: {e}") + +@app.route('/') +def index(): + """Main web interface""" + return render_template('ollama_chat.html') + +@app.route('/api/chat', methods=['POST']) +def chat_api(): + """REST API endpoint for chat""" + data = request.get_json() + user_message = data.get('message', '') + + if not user_message: + return jsonify({'error': 'No message provided'}), 400 + + # Query Ollama + response = query_ollama(user_message) + + return jsonify({ + 'user_message': user_message, + 'ai_response': response + }) + +@socketio.on('chat_message') +def handle_chat_message(data): + """Handle chat message via WebSocket""" + user_message = data.get('message', '') + + if user_message: + # Query Ollama + ai_response = query_ollama(user_message) + + # Send response back to client + emit('ai_response', { + 'user_message': user_message, + 'ai_response': ai_response + }) + +@socketio.on('speak_request') +def handle_speak_request(data): + """Handle text-to-speech request""" + text = data.get('text', '') + if text: + speak_text(text) + emit('speak_complete', {'text': text}) + +@socketio.on('voice_chat') +def handle_voice_chat(data): + """Handle voice chat request (text in, voice out)""" + user_message = data.get('message', '') + + if user_message: + # Query Ollama + ai_response = query_ollama(user_message) + + # Speak the response + speak_text(ai_response) + + # Send response to client + emit('voice_response', { + 'user_message': user_message, + 'ai_response': ai_response + }) + +@app.route('/status') +def status(): + """Check Ollama status""" + try: + response = requests.get(f"{OLLAMA_URL}/api/tags", timeout=5) + if response.status_code == 200: + models = response.json().get('models', []) + return jsonify({ + 'status': 'connected', + 'models': [m['name'] for m in models], + 'current_model': DEFAULT_MODEL + }) + else: + return jsonify({'status': 'error', 'message': 'Ollama not responding'}), 500 + except Exception as e: + return jsonify({'status': 'error', 'message': str(e)}), 500 + +if __name__ == '__main__': + print("🚀 Starting Ollama Flask Web Interface...") + print("Open your browser to http://localhost:5000") + socketio.run(app, host='0.0.0.0', port=5000, debug=True) \ No newline at end of file diff --git a/Lab 3/ollama/templates/ollama_chat.html b/Lab 3/ollama/templates/ollama_chat.html new file mode 100644 index 0000000000..c7e29d38e1 --- /dev/null +++ b/Lab 3/ollama/templates/ollama_chat.html @@ -0,0 +1,284 @@ + + + + + + Ollama Voice Assistant - Lab 3 + + + + +
+

🤖 Ollama Voice Assistant

+ +
+ Connecting to Ollama... +
+ +
+
+ 👋 Hello! I'm your Ollama-powered voice assistant. Type a message or use the voice features below! +
+
+ +
+ + +
+ +
+ + + + +
+
+ + + + \ No newline at end of file diff --git a/Lab 3/prep.md b/Lab 3/prep.md new file mode 100644 index 0000000000..124da15d94 --- /dev/null +++ b/Lab 3/prep.md @@ -0,0 +1,95 @@ + +# Prep your Pi + + +### To prepare lab 3, you will need: + +- Raspberry Pi 5 +- Active Cooler for Pi 5 +- Stacking Header 40 pin +- Bluetooth Speaker +- Logitech Webcam w/camera + + +### Install Active Cooler + +- Disconnect the Mini screen on pi just for now +- Unpack the preassembled Active Cooler from its box. +- Remove the backing paper from the thermal pads on the underside of the product. +- Make sure your Raspberry Pi 5 is powered off. Position the Active Cooler carefully in the correct space on Raspberry Pi 5, making sure not to hit any of the connectors. Please refer to the diagram on the front of the box which shows the correct position and orientation of the product. +- Align the two white push pins with the two dedicated heatsink holes. +- When correctly positioned, press evenly on the tops of the two push pins simultaneously until they click, indicating that they are clipped onto the board. +- Once the Active Cooler is mounted, connect its fan cable to the connector labelled‘FAN’ on Raspberry Pi 5. Take care to ensure the cable’s connector is the correct way round when inserting it. If you feel any resistance, stop immediately, remove the fan cable connector, and make sure that both it and the connector on Raspberry Pi 5 are undamaged before proceeding. Make sure that the connector on the cable is pushed down fully onto the connector on Raspberry Pi 5. +- We recommend that the Active Cooler is not removed once it is fitted to Raspberry Pi 5. Removal of the Active Cooler will cause the push pins and thermal pads to degrade and is likely to lead to product damage. +- Ensure the push pins are undamaged and can clip on to the Raspberry Pi board securely before use. Discontinue use of the Active Cooler and replace the push pins if they are damaged or deformed, or if they do not clip securely +- Connect the stacking header 40 pin to the 40-pin GPIO of RaspberryPi5 +- Reinstall the mini pi screen + + +See the [User Manual](https://datasheets.raspberrypi.com/cooling/raspberry-pi-active-cooler-product-brief.pdf) +See the [Video Walkthrough](https://www.youtube.com/shorts/e1CtdqeT3o0) + + +### Set up and connect bluetooth speaker + +1. Charge the Bluetooth speaker with the paired USB type C cable. +2. Disconnect the speaker from charging. Long press the power icon on the speaker body, until the small white LED flashes. + +#### Option 1: GUI Method (VNC) +3. Open VNC viewer and connect your Pi5. On the top right corner, click the Bluetooth icon, and on the dropdown menu, select "Make Discoverable". Meanwhile, select "Add Device". Once you find the 'X1', pair and connect with it. You should hear a "beep" if the connection is successful +choose os + +#### Option 2: Command Line Method +Alternatively, you can pair the X1 speaker from the terminal: + +1. **Start Bluetooth control:** + ```bash + sudo bluetoothctl + ``` + +2. **Enable Bluetooth and make discoverable:** + ```bash + power on + agent on + discoverable on + scan on + ``` + +3. **Find your X1 speaker (filter devices by name):** + ```bash + devices + ``` + Then look for a line containing "X1", or use this command to filter: + ```bash + exit + echo 'devices' | bluetoothctl | grep -i "X1" + ``` + This will show something like: `Device XX:XX:XX:XX:XX:XX X1` + +4. **Pair with your X1 (replace XX:XX:XX:XX:XX:XX with your device's MAC address):** + ```bash + bluetoothctl + pair XX:XX:XX:XX:XX:XX + trust XX:XX:XX:XX:XX:XX + connect XX:XX:XX:XX:XX:XX + exit + ``` + +You should hear a "beep" from the speaker when successfully connected. + +### Set up the Web camera + +1. stay in the VNC, open terminal and run this commandline when on VNC or pi connect: + + ``` + $ sudo apt install pavucontrol + ``` +2. open pavucontrol through this commandline. You should see an GUI open. + + ``` + $ pavucontrol + ``` +choose os +3. Navigate to the Configuration, make sure the profile of the C270 Webcam is Mono Input +4. Navigate to the Input Devices, you should see a bar moving as you speak - which means you have set up correctly + diff --git a/Lab 3/requirements.txt b/Lab 3/requirements.txt new file mode 100644 index 0000000000..83145d85ed --- /dev/null +++ b/Lab 3/requirements.txt @@ -0,0 +1,102 @@ +addict==2.4.0 +annotated-types==0.7.0 +attrs==25.3.0 +av==15.1.0 +babel==2.17.0 +blis==1.3.0 +catalogue==2.0.10 +certifi==2025.8.3 +cffi==2.0.0 +charset-normalizer==3.4.3 +click==8.3.0 +cloudpathlib==0.22.0 +coloredlogs==15.0.1 +confection==0.1.5 +csvw==3.6.0 +ctranslate2==4.6.0 +curated-tokenizers==0.0.9 +curated-transformers==0.1.1 +cymem==2.0.11 +dlinfo==2.0.0 +docopt==0.6.2 +espeakng-loader==0.2.4 +faster-whisper==1.2.0 +filelock==3.19.1 +flatbuffers==20181003210633 +fsspec==2025.9.0 +hf-xet==1.1.10 +huggingface_hub==0.35.0 +humanfriendly==10.0 +idna==3.10 +isodate==0.7.2 +Jinja2==3.1.6 +joblib==1.5.2 +jsonschema==4.25.1 +jsonschema-specifications==2025.9.1 +kittentts @ https://github.com/KittenML/KittenTTS/releases/download/0.1/kittentts-0.1.0-py3-none-any.whl +langcodes==3.5.0 +language-tags==1.2.0 +language_data==1.3.0 +llvmlite==0.45.0 +marisa-trie==1.3.1 +markdown-it-py==4.0.0 +MarkupSafe==3.0.2 +mdurl==0.1.2 +misaki==0.9.4 +more-itertools==10.8.0 +mpmath==1.3.0 +murmurhash==1.0.13 +networkx==3.5 +num2words==0.5.14 +numba==0.62.0 +numpy==2.3.3 +onnxruntime==1.22.1 +openai-whisper==20250625 +packaging==25.0 +phonemizer-fork==3.3.2 +piper-tts==1.3.0 +preshed==3.0.10 +protobuf==6.32.1 +pycparser==2.23 +pydantic==2.11.9 +pydantic_core==2.33.2 +Pygments==2.19.2 +pyparsing==3.2.4 +python-dateutil==2.9.0.post0 +PyYAML==6.0.2 +rdflib==7.1.4 +referencing==0.36.2 +regex==2025.9.1 +requests==2.32.5 +rfc3986==1.5.0 +rich==14.1.0 +rpds-py==0.27.1 +segments==2.3.0 +shellingham==1.5.4 +six==1.17.0 +smart_open==7.3.1 +sounddevice==0.5.2 +soundfile==0.13.1 +spacy==3.8.7 +spacy-curated-transformers==0.3.1 +spacy-legacy==3.0.12 +spacy-loggers==1.0.5 +srsly==2.5.1 +srt==3.5.3 +sympy==1.14.0 +termcolor==3.1.0 +thinc==8.3.6 +tiktoken==0.11.0 +tokenizers==0.22.0 +torch==2.8.0 +tqdm==4.67.1 +typer==0.17.4 +typing-inspection==0.4.1 +typing_extensions==4.15.0 +uritemplate==4.2.0 +urllib3==2.5.0 +vosk==0.3.45 +wasabi==1.1.3 +weasel==0.4.1 +websockets==15.0.1 +wrapt==1.17.3 diff --git a/Lab 3/server.py b/Lab 3/server.py new file mode 100644 index 0000000000..ae33946c36 --- /dev/null +++ b/Lab 3/server.py @@ -0,0 +1,12 @@ +from flask import Flask + +app = Flask(__name__) + +@app.route('/') +def index(): + return 'Hello world' + +if __name__ == '__main__': + app.run(debug=True, host='0.0.0.0') + + diff --git a/Lab 3/setup.sh b/Lab 3/setup.sh new file mode 100644 index 0000000000..606ca0baab --- /dev/null +++ b/Lab 3/setup.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +# Function to print a message and install a package +install_package() { + echo "Installing $1..." + shift # Shift to get the rest of the arguments + echo "Y" | "$@" # Run the command, piping 'Y' for approval + echo "$1 installed!" +} + +# Install pip package +# echo "Installing piper-tts via pip for local user..." +# pip install piper-tts --user +# echo "piper-tts installed!" + +# Install packages using apt-get +install_package "festival" sudo apt-get install festival +install_package "espeak" sudo apt-get install espeak +install_package "mplayer" sudo apt-get install mplayer +install_package "mpg123" sudo apt-get install mpg123 +install_package "libttspico-utils" sudo apt-get install libttspico-utils + +# Change all scripts in the subfolder 'speech-scripts' to be executable +echo "Making all scripts in the 'speech-scripts' subfolder executable..." +chmod u+x ./speech-scripts/* +echo "Scripts are now executable!" + +echo "All tasks completed!" diff --git a/Lab 3/speech-scripts/GoogleTTS_demo.sh b/Lab 3/speech-scripts/GoogleTTS_demo.sh new file mode 100755 index 0000000000..b6728c73b6 --- /dev/null +++ b/Lab 3/speech-scripts/GoogleTTS_demo.sh @@ -0,0 +1,7 @@ +#https://elinux.org/RPi_Text_to_Speech_(Speech_Synthesis) + +#!/bin/bash +say() { local IFS=+;/usr/bin/mplayer -ao alsa -really-quiet -noconsolecontrols "http://translate.google.com/translate_tts?ie=UTF-8&client=tw-ob&q=$*&tl=en"; } +#say $* +say " This mission is too important for me to allow you to jeopardize it." + diff --git a/Lab 3/speech-scripts/check_words_example/recorded_mono.wav b/Lab 3/speech-scripts/check_words_example/recorded_mono.wav new file mode 100644 index 0000000000..e3548201f3 Binary files /dev/null and b/Lab 3/speech-scripts/check_words_example/recorded_mono.wav differ diff --git a/Lab 3/speech-scripts/check_words_example/test_words.py b/Lab 3/speech-scripts/check_words_example/test_words.py new file mode 100644 index 0000000000..01922fe17f --- /dev/null +++ b/Lab 3/speech-scripts/check_words_example/test_words.py @@ -0,0 +1,63 @@ +import os +import wave +import json +import glob +from vosk import Model, KaldiRecognizer + +final_text_buffer = [] + +# Define cache model directory and check if the model is in cache +cache_model_path = os.path.expanduser("~/.cache/vosk/vosk-model-small-en-us-0.15") +if not os.path.exists(cache_model_path): + print("Please run the microphone_test.py first to download the model.") + exit(1) + +# Find the most recently created WAV file in the current directory +wav_files = glob.glob('*.wav') +if not wav_files: + print("No WAV files found in the current directory.") + exit(1) + +# Get the last created WAV file +latest_wav_file = max(wav_files, key=os.path.getctime) + +# Load the latest WAV file +wf = wave.open(latest_wav_file, "rb") +if wf.getnchannels() != 1 or wf.getsampwidth() != 2 or wf.getcomptype() != "NONE": + print("Audio file must be WAV format mono PCM.") + exit(1) + +# Set up recognizer with the model from the cache +model = Model(cache_model_path) +rec = KaldiRecognizer(model, wf.getframerate()) + +# Process the audio file +while True: + data = wf.readframes(4000) + if len(data) == 0: + break + if rec.AcceptWaveform(data): + print(rec.Result()) + else: + print(rec.PartialResult()) + +last = json.loads(rec.FinalResult()).get("text", "").strip() +if last: + print(f"Final: {last}") + final_text_buffer.append(last) + +final_text = "" +if final_text_buffer: + print("Transcript (joined):") + final_text = " ".join(final_text_buffer) + +print("Final Recognized Text: ", final_text) + +# Check if any of the predefined words are in the recognized text +words_list = ["oh", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "zero"] + +for word in words_list: + if word in final_text.split(): + print(f"The word '{word}' is in the recognized text.") + else: + print(f"The word '{word}' is not in the recognized text.") diff --git a/Lab 3/speech-scripts/check_words_example/test_words_old.py b/Lab 3/speech-scripts/check_words_example/test_words_old.py new file mode 100644 index 0000000000..e755c27c7c --- /dev/null +++ b/Lab 3/speech-scripts/check_words_example/test_words_old.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python3 + +from vosk import Model, KaldiRecognizer +import sys +import os +import wave + +if not os.path.exists("model"): + print ("Please download the model from https://github.com/alphacep/vosk-api/blob/master/doc/models.md and unpack as 'model' in the current folder.") + exit (1) + +wf = wave.open(sys.argv[1], "rb") +if wf.getnchannels() != 1 or wf.getsampwidth() != 2 or wf.getcomptype() != "NONE": + print ("Audio file must be WAV format mono PCM.") + exit (1) + +model = Model("model") +# You can also specify the possible word list +rec = KaldiRecognizer(model, wf.getframerate(), '["oh one two three four five six seven eight nine zero", "[unk]"]') + +while True: + data = wf.readframes(4000) + if len(data) == 0: + break + if rec.AcceptWaveform(data): + print(rec.Result()) + else: + print(rec.PartialResult()) + +print(rec.FinalResult()) diff --git a/Lab 3/speech-scripts/check_words_example/vosk_demo_mic.sh b/Lab 3/speech-scripts/check_words_example/vosk_demo_mic.sh new file mode 100644 index 0000000000..f846a87a1f --- /dev/null +++ b/Lab 3/speech-scripts/check_words_example/vosk_demo_mic.sh @@ -0,0 +1,4 @@ +#arecord -f cd -r 16000 -d 5 -t wav recorded.wav && sox recorded.wav recorded_mono.wav remix 1,2 + +arecord -D hw:2,0 -f cd -c1 -r 48000 -d 5 -t wav recorded_mono.wav +python3 test_words.py recorded_mono.wav diff --git a/Lab 3/speech-scripts/espeak_demo.sh b/Lab 3/speech-scripts/espeak_demo.sh new file mode 100755 index 0000000000..5111e7cdc6 --- /dev/null +++ b/Lab 3/speech-scripts/espeak_demo.sh @@ -0,0 +1,4 @@ + +# from https://elinux.org/RPi_Text_to_Speech_(Speech_Synthesis) +espeak -ven+f2 -k5 -s150 --stdout "I can make the Pi say anything at all" | aplay + diff --git a/Lab 3/speech-scripts/faster_whisper_try.py b/Lab 3/speech-scripts/faster_whisper_try.py new file mode 100755 index 0000000000..f40b470d44 --- /dev/null +++ b/Lab 3/speech-scripts/faster_whisper_try.py @@ -0,0 +1,25 @@ +from faster_whisper import WhisperModel + +import time + +start_time = time.perf_counter() + +model_size = "tiny" + +# Run on GPU with FP16 +# model = WhisperModel(model_size, device="cuda", compute_type="float16") + +# or run on GPU with INT8 +# model = WhisperModel(model_size, device="cuda", compute_type="int8_float16") +# or run on CPU with INT8 +model = WhisperModel(model_size, device="cpu", compute_type="int8") + +segments, info = model.transcribe("lookdave.wav", beam_size=5) + +for segment in segments: + print("[%.2fs -> %.2fs] %s" % (segment.start, segment.end, segment.text)) + +end_time = time.perf_counter() + +elapsed_time = end_time - start_time +print(f"Program executed in {elapsed_time:.6f} seconds") diff --git a/Lab 3/speech-scripts/festival_demo.sh b/Lab 3/speech-scripts/festival_demo.sh new file mode 100755 index 0000000000..75d6ab88fa --- /dev/null +++ b/Lab 3/speech-scripts/festival_demo.sh @@ -0,0 +1,4 @@ +#from: https://elinux.org/RPi_Text_to_Speech_(Speech_Synthesis)#Festival_Text_to_Speech + +echo "Just what do you think you're doing, Dave?" | festival --tts + diff --git a/Lab 3/speech-scripts/lookdave.wav b/Lab 3/speech-scripts/lookdave.wav new file mode 100755 index 0000000000..dcacf676e8 Binary files /dev/null and b/Lab 3/speech-scripts/lookdave.wav differ diff --git a/Lab 3/speech-scripts/my_greeting.sh b/Lab 3/speech-scripts/my_greeting.sh new file mode 100755 index 0000000000..ff78e6a286 --- /dev/null +++ b/Lab 3/speech-scripts/my_greeting.sh @@ -0,0 +1 @@ +echo "Greetings, Jesse Iriah. Welcome to Lab Three." | espeak diff --git a/Lab 3/speech-scripts/numerical_input.sh b/Lab 3/speech-scripts/numerical_input.sh new file mode 100755 index 0000000000..efc4d4d3b7 --- /dev/null +++ b/Lab 3/speech-scripts/numerical_input.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +# Use TTS to ask the question +echo "Please state your 5-digit zip code now." | espeak + +# Record the response +arecord -D hw:2,0 -f S16_LE -r 16000 -d 5 numerical_answer.wav + +echo "Recording complete. The audio is saved as numerical_answer.wav." + + diff --git a/Lab 3/speech-scripts/pico2text_demo.sh b/Lab 3/speech-scripts/pico2text_demo.sh new file mode 100755 index 0000000000..f80d7d59cb --- /dev/null +++ b/Lab 3/speech-scripts/pico2text_demo.sh @@ -0,0 +1,4 @@ +# from https://elinux.org/RPi_Text_to_Speech_(Speech_Synthesis) + +pico2wave -w lookdave.wav "Look Dave, I can see you're really upset about this." && aplay lookdave.wav + diff --git a/Lab 3/speech-scripts/recorded_mono.wav b/Lab 3/speech-scripts/recorded_mono.wav new file mode 100755 index 0000000000..f4ad47cea0 Binary files /dev/null and b/Lab 3/speech-scripts/recorded_mono.wav differ diff --git a/Lab 3/speech-scripts/test_microphone.py b/Lab 3/speech-scripts/test_microphone.py new file mode 100755 index 0000000000..b37dfcd369 --- /dev/null +++ b/Lab 3/speech-scripts/test_microphone.py @@ -0,0 +1,90 @@ +#!/usr/bin/env -S /home/pi/Interactive-Lab-Hub/Lab\ 3/.venv/bin/python + + +# prerequisites: as described in https://alphacephei.com/vosk/install and also python module `sounddevice` (simply run command `pip install sounddevice`) +# Example usage using Dutch (nl) recognition model: `python test_microphone.py -m nl` +# For more help run: `python test_microphone.py -h` + +import argparse +import queue +import sys +import sounddevice as sd + +from vosk import Model, KaldiRecognizer + +q = queue.Queue() + +def int_or_str(text): + """Helper function for argument parsing.""" + try: + return int(text) + except ValueError: + return text + +def callback(indata, frames, time, status): + """This is called (from a separate thread) for each audio block.""" + if status: + print(status, file=sys.stderr) + q.put(bytes(indata)) + +parser = argparse.ArgumentParser(add_help=False) +parser.add_argument( + "-l", "--list-devices", action="store_true", + help="show list of audio devices and exit") +args, remaining = parser.parse_known_args() +if args.list_devices: + print(sd.query_devices()) + parser.exit(0) +parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter, + parents=[parser]) +parser.add_argument( + "-f", "--filename", type=str, metavar="FILENAME", + help="audio file to store recording to") +parser.add_argument( + "-d", "--device", type=int_or_str, + help="input device (numeric ID or substring)") +parser.add_argument( + "-r", "--samplerate", type=int, help="sampling rate") +parser.add_argument( + "-m", "--model", type=str, help="language model; e.g. en-us, fr, nl; default is en-us") +args = parser.parse_args(remaining) + +try: + if args.samplerate is None: + device_info = sd.query_devices(args.device, "input") + # soundfile expects an int, sounddevice provides a float: + args.samplerate = int(device_info["default_samplerate"]) + + if args.model is None: + model = Model(lang="en-us") + else: + model = Model(lang=args.model) + + if args.filename: + dump_fn = open(args.filename, "wb") + else: + dump_fn = None + + with sd.RawInputStream(samplerate=args.samplerate, blocksize = 8000, device=args.device, + dtype="int16", channels=1, callback=callback): + print("#" * 80) + print("Press Ctrl+C to stop the recording") + print("#" * 80) + + rec = KaldiRecognizer(model, args.samplerate) + while True: + data = q.get() + if rec.AcceptWaveform(data): + print(rec.Result()) + else: + print(rec.PartialResult()) + if dump_fn is not None: + dump_fn.write(data) + +except KeyboardInterrupt: + print("\nDone") + parser.exit(0) +except Exception as e: + parser.exit(type(e).__name__ + ": " + str(e)) diff --git a/Lab 3/speech-scripts/whisper_try.py b/Lab 3/speech-scripts/whisper_try.py new file mode 100755 index 0000000000..5e8e36a678 --- /dev/null +++ b/Lab 3/speech-scripts/whisper_try.py @@ -0,0 +1,16 @@ + +import time + +start_time = time.perf_counter() + +import whisper + +model = whisper.load_model("tiny") +result = model.transcribe("lookdave.wav") + +print(result["text"]) + +end_time = time.perf_counter() + +elapsed_time = end_time - start_time +print(f"Program executed in {elapsed_time:.6f} seconds") diff --git a/README.md b/README.md index 7f60fa737e..571e0e7fd4 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,13 @@ -# [Your name here]'s-Lab-Hub +# [Jesse Iriah]'s-Lab-Hub for [Interactive Device Design](https://github.com/FAR-Lab/Developing-and-Designing-Interactive-Devices/) Please place links here to the README.md's for each of your labs here: [Lab 1. Staging Interaction](Lab%201/) -Lab 2. Interactive Prototyping: The Clock of Pi +[Lab 2. Interactive Prototyping: The Clock of Pi](Lab%202/) -Lab 3. Chatterboxes +[Lab 3. Chatterboxes](Lab%203/) Lab 4. Ph-UI!!!