From 5149143f70ea855b59ea59c4c865503373372579 Mon Sep 17 00:00:00 2001 From: gr1shan1a <368409@edu.itmo.ru> Date: Sat, 8 Nov 2025 20:08:21 +0800 Subject: [PATCH 1/9] <08.11.2025 20:05> --- .gitignore | 9 ++-- README.md | 15 +++++- app.py | 114 +++++++++++++++++++++++++++++++++++++---- docs/index.html | 32 ++++++++++++ model.py | 17 ++++-- rf_stress_model.joblib | Bin 0 -> 74705 bytes simple_demo.py | 15 ++++-- 7 files changed, 179 insertions(+), 23 deletions(-) create mode 100644 docs/index.html create mode 100644 rf_stress_model.joblib diff --git a/.gitignore b/.gitignore index f0c6272..78a2798 100644 --- a/.gitignore +++ b/.gitignore @@ -25,8 +25,8 @@ __pycache__/ *.pyo *.pyd *.pyclass -.env -.env.* +# .env +# .env.* venv/ venv*/ .venv/ @@ -78,4 +78,7 @@ coverage/ coverage.xml .cache/ *.bak -*.swp \ No newline at end of file +*.swp + + +REPOSITORY_DETAILS.md \ No newline at end of file diff --git a/README.md b/README.md index 0ae8d6c..31f50c3 100644 --- a/README.md +++ b/README.md @@ -61,6 +61,20 @@ If you don't have these components, use `simple_demo.py` to experiment quickly. Presentation and further info - The project presentation `MindGuard_An_Intelligent_Assistant_for_Student_Stress_Management_FinalProoposal.pptx` appears in the repository root. It likely contains project motivation, dataset descriptions, and proposed model architectures. If you want, I can extract its text and slides into a Markdown summary (I can do that automatically if you want me to install and run `python-pptx` locally). +Environment variables +``` +GOOGLE_API_KEY=your_google_api_key_here +``` + +If `GOOGLE_API_KEY` is set, the Streamlit app (`app.py`) will call the Google Generative AI (Gemini) to create personalized recommendations. If the key is not set, the app now provides a safe, local fallback set of recommendations so the UI still works without external APIs. + +Note about Gemini quotas and errors +- The Gemini API enforces quotas and rate limits. If your project exceeds the quota (or the API returns rate-limit errors), the application will now: + 1. show a concise warning in the UI informing you that the external AI is unavailable, and + 2. use a safe local fallback recommendation generator so users still receive helpful, non-medical advice. + +If you rely on Gemini for richer responses, monitor your Google Cloud quota and billing, or use a paid plan to increase rate limits. The app logs the full API error to the server console for debugging but intentionally avoids showing raw API errors in the UI. + Files added/changed in this update - `README.md` — this file (expanded with aim, goals, and instructions). - `data/StressLevelDataset.csv` — small synthetic sample dataset (so the EDA and demo can run). @@ -78,4 +92,3 @@ Presentation and further info Recommendations / result view: ![Recommendations view](assets/screenshots/recommendations_view.svg) ---- diff --git a/app.py b/app.py index 2e44a9c..aac6ace 100644 --- a/app.py +++ b/app.py @@ -4,6 +4,7 @@ import os from dotenv import load_dotenv import google.generativeai as genai +import joblib load_dotenv() @@ -50,8 +51,8 @@ def get_h2o_model(model_path): def get_gemini_recommendations(stress_level_text, input_data): api_key = os.getenv("GOOGLE_API_KEY") if not api_key: - st.error("Google API Key not found. Please ensure it is set in the .env file.") - return None + # No Google API key — provide a safe local fallback recommendation instead + return generate_local_recommendations(stress_level_text, input_data) try: genai.configure(api_key=api_key) model = genai.GenerativeModel(model_name='gemini-2.5-pro') @@ -90,18 +91,96 @@ def get_gemini_recommendations(stress_level_text, input_data): response = model.generate_content(prompt) return response.text except Exception as e: - st.error(f"An error occurred while calling Gemini AI: {e}") - return None + # Don't dump the raw API error to the user UI. Show a friendly message and fall back. + err_text = str(e) + # Detect common quota/rate-limit signals (simplified check) + if '429' in err_text or 'quota' in err_text.lower() or 'rate limit' in err_text.lower(): + st.warning("The Gemini API is temporarily unavailable due to quota or rate limits. Showing local recommendations instead.") + st.info("To enable richer AI-generated advice, add a valid `GOOGLE_API_KEY` with an appropriate quota/billing plan. See the README for details.") + else: + st.warning("The Gemini AI service returned an error. Showing local recommendations instead.") + + # Log the full error to the server console for debugging, but keep the UI message concise. + print("Gemini API error:", err_text) + + # Fall back to local recommendations + return generate_local_recommendations(stress_level_text, input_data) + + +def generate_local_recommendations(stress_level_text, input_data): + """Return a short, safe recommendation string when Gemini API is not available. + The content is intentionally generic and supportive (not medical advice). + """ + # Extract a few key values safely + try: + sleep_q = input_data.get('sleep_quality', [None])[0] + depression = input_data.get('depression', [None])[0] + academic = input_data.get('academic_performance', [None])[0] + teacher_rel = input_data.get('teacher_student_relationship', [None])[0] + except Exception: + sleep_q = depression = academic = teacher_rel = None + + support = ( + "It sounds like you are going through a challenging time — that's understandable. " + "You're taking a good step by checking in on your wellbeing." + ) + + bullets = [] + # Tailor bullets simply based on a few factors + if sleep_q is not None and sleep_q <= 2: + bullets.append("Try a 15-minute wind-down routine before bed to improve sleep quality.") + else: + bullets.append("Maintain regular sleep routines — consistent bed and wake times help a lot.") -MODEL_PATH = "XGBoost_1_AutoML_1_20251102_85004" -ml_model = get_h2o_model(MODEL_PATH) + if academic is not None and academic <= 2: + bullets.append("Break study tasks into 25-minute focused sessions with short breaks (Pomodoro).") + else: + bullets.append("Keep using structured study blocks and take short breaks to avoid burnout.") + + if depression is not None and depression >= 15: + bullets.append("If low mood or worry persists, consider contacting campus mental health services or a trusted professional.") + elif teacher_rel is not None and teacher_rel <= 2: + bullets.append("Try reaching out to a trusted instructor or peer to discuss academic concerns.") + else: + bullets.append("Reach out to friends or family and share how you feel — social support helps.") + + # Compose markdown-style text similar to Gemini output + md = f"**Support:** {support}\n\n**Recommendations:**\n" + for b in bullets: + md += f"- {b}\n" + + return md + + + +# Prefer a lightweight scikit-learn model (created by `simple_demo.py`) when present. +SKLEARN_MODEL_PATH = "rf_stress_model.joblib" +use_sklearn = False +sklearn_model = None +if os.path.exists(SKLEARN_MODEL_PATH): + try: + sklearn_model = joblib.load(SKLEARN_MODEL_PATH) + use_sklearn = True + print(f"Loaded scikit-learn model from '{SKLEARN_MODEL_PATH}'. Streamlit will use this model (no Java/H2O required).") + except Exception as e: + print(f"Failed to load scikit-learn model '{SKLEARN_MODEL_PATH}': {e}. Falling back to H2O if available.") + +ml_model = None +if not use_sklearn: + MODEL_PATH = "XGBoost_1_AutoML_1_20251102_85004" + ml_model = get_h2o_model(MODEL_PATH) st.title("🧠 MindGuard: Assess Your Stress Level") st.write("Answer a few questions to get an assessment of your current stress level and personalized recommendations.") -if ml_model: +# Proceed if either a scikit-learn model or the H2O model is available +if use_sklearn or ml_model: + if use_sklearn: + st.info("Using local scikit-learn model (rf_stress_model.joblib) for predictions.") + else: + st.info("Using H2O model for predictions.") st.subheader("Please rate the following factors:") col1, col2 = st.columns(2) with col1: @@ -130,11 +209,24 @@ def get_gemini_recommendations(stress_level_text, input_data): } input_df = pd.DataFrame(input_data) - h2o_input_frame = h2o.H2OFrame(input_df) - # 2. Make Prediction - prediction = ml_model.predict(h2o_input_frame) - predicted_level = prediction['predict'].as_data_frame().iloc[0, 0] + # 2. Make Prediction (scikit-learn fallback or H2O) + if use_sklearn and sklearn_model is not None: + try: + preds = sklearn_model.predict(input_df) + # sklearn returns integer labels (0/1/2) + predicted_level = float(preds[0]) + except Exception as e: + st.error(f"Failed to predict with scikit-learn model: {e}") + predicted_level = None + else: + if ml_model is None: + st.error("No ML model available (H2O model failed to load and no scikit-learn fallback found).") + predicted_level = None + else: + h2o_input_frame = h2o.H2OFrame(input_df) + prediction = ml_model.predict(h2o_input_frame) + predicted_level = prediction['predict'].as_data_frame().iloc[0, 0] stress_map = {0.0: "Low", 1.0: "Medium", 2.0: "High"} predicted_stress_text = stress_map.get(predicted_level, "Unknown") diff --git a/docs/index.html b/docs/index.html new file mode 100644 index 0000000..96117e7 --- /dev/null +++ b/docs/index.html @@ -0,0 +1,32 @@ + + + + + + + MindGuard Demo + + + +
+

MindGuard — Live demo (static)

+

This static page is auto-generated from the repository demo model. It shows a sample input, the predicted stress level, and simple recommendations.

+ +

Sample input

+
{'sleep_quality': 3, 'teacher_student_relationship': 4, 'blood_pressure': 2, 'future_career_concerns': 3, 'depression': 8, 'academic_performance': 4, 'social_support': 2, 'self_esteem': 18, 'safety': 2, 'headache': 1, 'anxiety_level': 12, 'mental_health_history': 0, 'breathing_problem': 2, 'noise_level': 2, 'living_conditions': 2, 'basic_needs': 2, 'study_load': 2, 'peer_pressure': 2, 'extracurricular_activities': 1, 'bullying': 0}
+ +

Predicted stress level

+
1 (Medium)
+ +

Recommendation

+

You're assessed at MEDIUM stress. Try 15-minute wind-down routines before bed, Pomodoro study blocks (25/5), and discuss career concerns with a mentor.

+
+ + diff --git a/model.py b/model.py index 3f6f748..1e63b51 100644 --- a/model.py +++ b/model.py @@ -10,11 +10,18 @@ # --- 2. Load and Initial Data Inspection --- print("--- Loading Data ---") -try: - df = pd.read_csv('StressLevelDataset.csv') - print("Dataset loaded successfully.") -except FileNotFoundError: - print("Error: 'StressLevelDataset.csv' not found. Ensure it is in the same directory.") +df = None +# Try the repository root first, then the data/ folder. +for path in ("StressLevelDataset.csv", "data/StressLevelDataset.csv"): + try: + df = pd.read_csv(path) + print(f"Dataset loaded successfully from '{path}'.") + break + except FileNotFoundError: + continue + +if df is None: + print("Error: 'StressLevelDataset.csv' not found. Ensure it is in the repository root or in the data/ folder.") exit() print("\n--- First 5 rows of the dataset: ---") diff --git a/rf_stress_model.joblib b/rf_stress_model.joblib new file mode 100644 index 0000000000000000000000000000000000000000..bd8613ae044022752546546945b7ec9d6152a366 GIT binary patch literal 74705 zcmeHQdyE~`eZJTG@IL(5wb#aAN-R>S-8hS}jq8Sx9mljLzGPF87}65=YVO@xp9}Zp z`|v|45d`tDI{hPMwoz17rD;`?w55MUkt$KEt*W-7X~b=mq^KZ7QzHsiRT_enDgk=t zp84+CbLP&@+i@^#_ZVSu0Ol<+@eM z6|L#)e5Gd98;-sG9xKD(Rdb$l_B`S|TA^XpXbHz2JXI(c9DAf*EfyLyljh{wO&Kkj z=dyEFwLzR&d$Lq0XX|FETD0ofc2Os@&YoDDOS?4hWYYEyH}kAjI7J%S`I?z;(8pxk z7mu<2r&9I?_uliC*=W*k)7x)y({7n;xiZ(z*-&c_M7G{A8x}3G&An@(RBh6hpQp{r z*DSMcIg4)D?!Nb6T46MwuV&3+m6(ePZ7fyFl}4pp$Y=9(xM%H&tT%?)Ggi$VGRGde zb^4d4_dCs;y`h}#jCS40%-Iv2WVUM7%u?OiO~%P)GC?NE7P6I0k?mv~*+H(QGdY^8 zR2ub0&8#|4+M|_9E?dvjS#>Vh!{zJ)l^pFh*_Qc5dp&({u0p$#N!ep(&05Lra;@Q9 zAlJ;=5X`{ zY?kOW(gAhst+Xk{LT{+Jow11di03x%ODop=#H1 z?Dge2(=AJ3Y7fx6q9!xZ z%so7`a5?SqYHqP3Iiey0qIL zGSFMwFPx-=-B>SLRyBLNNn6`E@7UKjEHh868lC3moZI%ARWx0W)JdW0*qd_2N@Xrv zrM#$9a!@kQH(i!w^Cl&8jrOISw`yg|@-fOyw^SRnQ)b?rvq}`#bgrl}DVekwy<5GK zFPOz_y;-eRC_L=(x>cNaQEgcz#~!Ym^OoDF5n`EhZnGSFqgg&zpqZ48XRIPsGVTD= zThJ242Fa2_o$4N1Wim$@*dT@SDcV?yA@_dc<%&_mGyznRVL8 zvSrP=U2w;MKB;2P(Fa%E{_&1qXPu+$$v11YLcUovYgt#7oS`>zRoqCfSuCEXcXuob zlIevlyCQ}j>q zR%W^CaNEn3jzdQ6@lKMqp8|}Gc&&Do!$z_xNI{UBT-~tk`25TNkzK4Ju34-i2vry# z%S;i2YL_ERjmP|=@gR4Jv=v5UeS0+cc(C5THRFwfvEX>W$ZKOE~7*EsFGJx#47nsA6oJG3gOQoktw92kt#|kn2A3k=fn$Skanq zWb>p@q-+?kwGF73TJLFYYE~MAlAcuDG9$2ul<4KQ@B4N zRT`9+rz+V-g_3o?;WXMEIV~f-+stNCk2|DB9?FbRS*LP>$_BYNvxCYemAj}sL-Ti2 zd6uU4QF+e2bMscA!-A@JF_Ss`xYNu~=w&ASd;*UKJ-AGIl8F(6$xAbsCzhTtP!IgW z^QEV+bleA?fXchK!+~MeQC7R<@fPw`FD*+v6_Sqw%aPM4C(E~H79R8RnmKYE*-8KH zQQv*@jyE%k`i>9AXa@#jOFR8B7C!D>A;-yY__P)I&CDRRp}9!fXG7;<^apUeaLBv= zV*QgQh0FkXK$=l*z^a|u9Kk(-Q{Y|X1N!r##D@t3{l46AfKm|@%g!dTs`jiTu1Ra zKRIsn*!Q3~jz32jmQI3SSJt0Xd%wLS!DlEwJ44_8f$`V9-mu#6iC(w7xC(ehJ|xD& zpH{}_{14vxg#@3W_!RXg?0ektDdQ11mGKB%%2I=UKfmL~THNuOqU&L4-+CC|f$Fjz z7S+Cr@i+HBgY9d&hxpl#17&^m-6Q`Kdwry^{<-w}ALpMLP;FULMU zQs5KtBJLk!9#(EYVVqAn{RRB2UOp}_pD_Nype#!8@mZ8fe}>i5;Ex>t(kq+d?vE(& z3HArPV)KKz%g-mQpQ1eM(~vN+6UO!bbejJEvb6t<`}S*JQU)Fq)t^FpSv`(7)%tq) z)bi{S_|cj<(f;rIo_x)_j$T9Ze#reoiSAMZpZ@v8mzr_slZO2{ar}XM|N0%usp2te zeejk&7wyuu|AF`uxJK_F!*Q$ZKi&6N7f&Ym3|$}T?!Wl)3H_;xuc-C$JcbyKmtcL{ z%l<6X!49d9SbTa?h7km6PUx&yzT zGM~Quu{&=|_!Mg2uQZ=F3Xnh?l<-LymQDgZg2V;R8{~*mgTMU6r=Pw#?tEHCe+2f6 z&5Le-#6SPa`g39USBBy~-x}7J;1{rY-DNPjR^s=Tv@Yt$AsY|;%dacr^WeMH*zYH3 zz^8b=7Jg}R{sxHbE;ZQ8^uXQc68#zKe|CQ#OgBEc-svx%f?pb^_4zlxcR23&O#C_e zF&*!F6_?++E?MvH_5E#rUKQ&=%wBN+y+3`@yFL=*hZyRkKX~`fYTWga27hisa9FVZ znZz5yuyhifXUg%$O)uSlD8Xl_KPT!*u0KWnrK&%<{!`Vz!RzB!*?#sTqX$xP$EU*n z5!Pw8baaFJ1Nt-i`ET)j9pdTLyIviYA0j`|uchK09s5U^46J8EA|I0Lah4O7*0BHn zH&b@<{flsaIq3Y7h^M&ed*Bl1m|8urgDU$Y&pnm?LfrNJviZx&_gR2oQF6VijL)gB zyd8V}so;-5KX#AjR_;8D_ecJd1pDhc{1J|Skrzn8KY>JkSUnB<2~Xd-^@h0X zdkuUN&;7xV)y*e9--ve+Ux?q=8(j78-{|2}%lG~am?6ryw7plOY@fdUuRr``-1(%z ze;O4g5V()~{v7ZO{~m*Y{v$0*eq9-#zx>IC>*J2kWPgtD&uy#TI~@1<)}W8NuiyFi zKfuhW>tCYZzyTt)KO?z6`yD-a+#AvK%?BVLj|;J6=e~9iIyRC;UIr zUIGVx{}XrvZe%}_&EqbE$<-T=Mdb(j%lZSqd?|kP63>AbkciJ@={-Hd_YP39yVMZR zUH;Ki<+$@ngT5EzDc~mV{-^AB0@pGcq30p_b!Gqavk$%=`}5x#^u4Ipu+I@)-;4Oh zeq^HW`QHlZ_V*^B*TD-&;2+l^kq;^JB`mGMf7kQ}8w@rd zetnO5ii%I*nbqU-F2@Dp_tr+%`1&#*CyX~F`+n1Fo%Lqq%i}BNBc*{)ADR1V?APx! z@JWo9F`p~PC;5H;<@sJ3`qi(!LENJ|UWR|v^7e0dJHZ6VrTK(Xa($jwjz`}6#L4-@ ze+u=V#CQVdZ~f<8Fk}7MCyvLc^p2?ubfv#9bhW*&5$q53vL6ONsO9{>a7D-!?V83Eo5J&m;HI%4RH}9@*&|* z`_sz$=-K1xPsAOc8rB;y@=Rhr{P7XW^@lIqH8jWpcwB-=7ypV^yu+UHt9Y z*uPJyLEnq}iP-n^`=6L;RX#SyweZLCM-|qkJ&mD-nzSn?HaX*ot zpM_p#_?R%@AM}SW;9lLI(SuL;ACO=lw@b(oCBLrhk36{k4Ku-K;Ce0elZZp?cl*y{ zB=~fZsu7=uUip)OxZ_iUzbtWGBfB%zzc<5~0^}UApQ+yu>{*xb{(D(ht zpTK>;@h8WJp7;~%BQieub7aKII90~y7x!L@{drdH_ymsP{{B4B2Ydpr$PdS!W>geLeaUekocdr;!hwLLy(o`zrf4 zKe;o(XDB|0`H<6Juo&lgfKqw>&o`&vo{_*o$x7?PcojtC*Yl>VRxeM2a0x&>vx$? zVma8y)ob7M@JZ$o_{8xKMu`&jAaOOUo(B7L`_~U7`}4v69&ikNdoyD&seAtuJc-Un zaes>AB^b~ByuSb(N5>^fD(m}=Upo-{_+G>MeNa3h;6gG_~YTh=-@)j z`;^d^khnV}=L5@;(;D`d{&&~gvxz?v>W_?JO4@Se{d@U5?eh5dz4Jrl7sqF?{#ILm z3gQ8#=Zc>X|G#IxkxTd#$|v!>AaJ8QUgmg)9YOri{d+fwe-9ps67~raS21r$%Tj}X zKk}iUCV!tY+`sScKk4@G`Fuz9pG5n>EcK8t{rX<)7mrW$=zH)35_(;hU<0JA$K^E2$x?%V|LNEE+>+?~ zK>rE65Z9mf8f+$X>wEZBt8M)rxC5?N5TEzI_{}@wj?Yz~@3&xz+cM~UG}o6={cBO* za@<^*`n>&XR^OMu{}rL z^B&_RO4NhI)v$UR;*o!R?T!y5_9@gp0pH%t7)wR1!R~+;SKEGeRQrnldWbcE+~u`TcnbIe ziTrRo#7g?hDf8)PZ~Xli;%=Wb?2m}+Wzz&a!q=d*wLosqcXQ$xHAzl6=hE`!OHijU$d{DOYj-0@5S#aVZTB4UsUxy);;^R7ree6 ze8T^L1p7FTkRwWdUD+S`<5M#qk2^k>@$Z$2>unsL!T3mL(wTuL9I~GrAP33Pf1iNQ zGv;4;zHc3VCfWZCUtjLN{}_FJxjQaoea7`kRC_7jm*eBg)aUIF#(OX4?_1Kjd+0@3 zf(?*-99WK=Mmbq(Sid{9`S{#ErSqSnXrJj574PB5Nz_#{C-(2pYtZ-I-xu-ke_&?%!xx`-;2!61#OF^e)BBX%-&SpZFh6?H_bpG) zLvJIEQHBhApOTluFDChTBVTzx@#UKaV}CwWL;ML`vvJ>NFsb`|J)htH@XXhHgYn#p bz6Z*HlhF+1bolOicejB^JJo literal 0 HcmV?d00001 diff --git a/simple_demo.py b/simple_demo.py index c6eeaaf..4a6edcf 100644 --- a/simple_demo.py +++ b/simple_demo.py @@ -23,9 +23,18 @@ def train_and_save(df, model_path='rf_stress_model.joblib'): X = df.drop(columns=['stress_level']) y = df['stress_level'] - X_train, X_test, y_train, y_test = train_test_split( - X, y, test_size=0.33, random_state=42, stratify=y - ) + # Try a stratified split; if the dataset is very small this can fail + # because the requested test set would contain fewer samples than classes. + try: + X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=0.33, random_state=42, stratify=y + ) + except ValueError as e: + print("\nWarning: stratified train/test split failed:", e) + print("Falling back to a non-stratified split for this small dataset.") + X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=0.33, random_state=42, stratify=None + ) clf = RandomForestClassifier(n_estimators=100, random_state=42) clf.fit(X_train, y_train) From ad935dae067b0d68781f0d8fb10756a65c3a87e3 Mon Sep 17 00:00:00 2001 From: gr1shan1a <368409@edu.itmo.ru> Date: Sat, 8 Nov 2025 20:10:34 +0800 Subject: [PATCH 2/9] <08.11.2025 20:05> --- .github/workflows/publish.yml | 28 +++++++++++++++++++++++++--- .gitignore | 4 ++-- 2 files changed, 27 insertions(+), 5 deletions(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 469b2a6..619f714 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -3,16 +3,27 @@ name: Build and publish static demo site on: push: branches: [ main ] + # Ignore pushes that come from this workflow committing generated artifacts + paths-ignore: + - 'docs/**' + - 'rf_stress_model.joblib' permissions: contents: write + pages: write + id-token: write jobs: build-and-publish: runs-on: ubuntu-latest + concurrency: + group: 'publish-docs' + cancel-in-progress: true steps: - name: Checkout repository uses: actions/checkout@v4 + with: + fetch-depth: 0 - name: Set up Python uses: actions/setup-python@v4 @@ -29,10 +40,21 @@ jobs: python simple_demo.py python generate_site.py - - name: Commit generated docs + - name: Commit generated docs and model artifact uses: EndBug/add-and-commit@v9 with: author_name: github-actions[bot] author_email: github-actions[bot]@users.noreply.github.com - message: "ci: generate site (docs)" - add: 'docs' + message: "ci: generate site (docs) and model" + add: 'docs rf_stress_model.joblib' + + - name: Configure Pages + uses: actions/configure-pages@v3 + + - name: Upload artifact for GitHub Pages + uses: actions/upload-pages-artifact@v1 + with: + path: docs + + - name: Deploy to GitHub Pages + uses: actions/deploy-pages@v1 diff --git a/.gitignore b/.gitignore index 78a2798..549752a 100644 --- a/.gitignore +++ b/.gitignore @@ -25,8 +25,8 @@ __pycache__/ *.pyo *.pyd *.pyclass -# .env -# .env.* +.env +.env.* venv/ venv*/ .venv/ From 0ce4326c26a89996d75d05cd1854157b05558de6 Mon Sep 17 00:00:00 2001 From: gr1shan1a <368409@edu.itmo.ru> Date: Sat, 8 Nov 2025 20:14:54 +0800 Subject: [PATCH 3/9] <08.11.2025 20:05> --- .github/workflows/publish.yml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 619f714..fde1cc2 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -1,9 +1,15 @@ name: Build and publish static demo site on: + # Run on pushes to any branch so CI checks run before merge push: + # Ignore generated artifacts to avoid workflow retrigger loops + paths-ignore: + - 'docs/**' + - 'rf_stress_model.joblib' + # Also run the workflow on pull requests targeting main so you can see results before merging + pull_request: branches: [ main ] - # Ignore pushes that come from this workflow committing generated artifacts paths-ignore: - 'docs/**' - 'rf_stress_model.joblib' From 5c1491a18434edaefaf19b442540bdf98e730ccc Mon Sep 17 00:00:00 2001 From: gr1shan1a <368409@edu.itmo.ru> Date: Sat, 8 Nov 2025 20:18:34 +0800 Subject: [PATCH 4/9] <08.11.2025 20:018> --- .github/workflows/publish.yml | 29 +++++++++++------------------ 1 file changed, 11 insertions(+), 18 deletions(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index fde1cc2..476ca71 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -1,8 +1,10 @@ name: Build and publish static demo site on: - # Run on pushes to any branch so CI checks run before merge + # Run on pushes to any branch (except the generated gh-pages branch) so CI checks run before merge push: + # Do not run on the generated gh-pages branch to avoid loops + branches-ignore: [ 'gh-pages' ] # Ignore generated artifacts to avoid workflow retrigger loops paths-ignore: - 'docs/**' @@ -46,21 +48,12 @@ jobs: python simple_demo.py python generate_site.py - - name: Commit generated docs and model artifact - uses: EndBug/add-and-commit@v9 + - name: Deploy site to gh-pages branch + uses: peaceiris/actions-gh-pages@v3 with: - author_name: github-actions[bot] - author_email: github-actions[bot]@users.noreply.github.com - message: "ci: generate site (docs) and model" - add: 'docs rf_stress_model.joblib' - - - name: Configure Pages - uses: actions/configure-pages@v3 - - - name: Upload artifact for GitHub Pages - uses: actions/upload-pages-artifact@v1 - with: - path: docs - - - name: Deploy to GitHub Pages - uses: actions/deploy-pages@v1 + github_token: ${{ secrets.GITHUB_TOKEN }} + publish_dir: ./docs + publish_branch: gh-pages + # keep repository history small by not committing other files + user_name: github-actions[bot] + user_email: github-actions[bot]@users.noreply.github.com From 52fbbf5e1c1874ad6b0614b0b1423bb0837b753e Mon Sep 17 00:00:00 2001 From: gr1shan1a <368409@edu.itmo.ru> Date: Sat, 8 Nov 2025 20:24:58 +0800 Subject: [PATCH 5/9] <08.11.2025 20:24> --- .github/workflows/publish.yml | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 476ca71..b200904 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -3,8 +3,9 @@ name: Build and publish static demo site on: # Run on pushes to any branch (except the generated gh-pages branch) so CI checks run before merge push: - # Do not run on the generated gh-pages branch to avoid loops - branches-ignore: [ 'gh-pages' ] + # Run on all branches except the generated gh-pages branch + branches-ignore: + - 'gh-pages' # Ignore generated artifacts to avoid workflow retrigger loops paths-ignore: - 'docs/**' @@ -25,7 +26,8 @@ jobs: build-and-publish: runs-on: ubuntu-latest concurrency: - group: 'publish-docs' + # Use a per-branch concurrency group so runs for different branches do not cancel each other + group: publish-docs-${{ github.ref_name }} cancel-in-progress: true steps: - name: Checkout repository From 7c27ef6e9eb7f331ead4ba11e6e88eeb5fa20918 Mon Sep 17 00:00:00 2001 From: Kuranov Grigory <368409@edu.itmo.ru> Date: Sat, 8 Nov 2025 20:29:12 +0800 Subject: [PATCH 6/9] Update README.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 31f50c3..75f04b9 100644 --- a/README.md +++ b/README.md @@ -61,7 +61,7 @@ If you don't have these components, use `simple_demo.py` to experiment quickly. Presentation and further info - The project presentation `MindGuard_An_Intelligent_Assistant_for_Student_Stress_Management_FinalProoposal.pptx` appears in the repository root. It likely contains project motivation, dataset descriptions, and proposed model architectures. If you want, I can extract its text and slides into a Markdown summary (I can do that automatically if you want me to install and run `python-pptx` locally). -Environment variables +## Environment variables ``` GOOGLE_API_KEY=your_google_api_key_here ``` From 346732d6a472f8cbb8d5cde99b18bcc786ed46d0 Mon Sep 17 00:00:00 2001 From: gr1shan1a <368409@edu.itmo.ru> Date: Sat, 8 Nov 2025 21:23:42 +0800 Subject: [PATCH 7/9] <08.11.2025 21:23> --- .dockerignore | 9 +++++++ .github/workflows/docker-publish.yml | 36 ++++++++++++++++++++++++++++ Dockerfile | 26 ++++++++++++++++++++ README.md | 27 +++++++++++++++++++++ docker-compose.yml | 13 ++++++++++ 5 files changed, 111 insertions(+) create mode 100644 .dockerignore create mode 100644 .github/workflows/docker-publish.yml create mode 100644 Dockerfile create mode 100644 docker-compose.yml diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..6556c5f --- /dev/null +++ b/.dockerignore @@ -0,0 +1,9 @@ +.venv +__pycache__ +*.pyc +.pytest_cache +.git +docs +data/*.csv +.env +rf_stress_model.joblib diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml new file mode 100644 index 0000000..2d35b7d --- /dev/null +++ b/.github/workflows/docker-publish.yml @@ -0,0 +1,36 @@ +name: Build and publish Docker image + +on: + push: + branches: [ main ] + +permissions: + contents: read + packages: write + +jobs: + build-and-push: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build and push image + uses: docker/build-push-action@v4 + with: + push: true + tags: ghcr.io/${{ github.repository_owner }}/mindguard:latest + file: ./Dockerfile diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..3d6ba42 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,26 @@ +FROM python:3.10-slim + +# Prevent Python from writing .pyc files and buffering stdout/stderr +ENV PYTHONDONTWRITEBYTECODE=1 +ENV PYTHONUNBUFFERED=1 + +WORKDIR /app + +# Install system deps (minimal) and Python deps +RUN apt-get update && apt-get install -y --no-install-recommends \ + build-essential \ + && rm -rf /var/lib/apt/lists/* + +COPY requirements.txt ./ +RUN pip install --no-cache-dir -r requirements.txt + +# Copy project +COPY . /app + +# Expose Streamlit default port +EXPOSE 8501 + +ENV STREAMLIT_SERVER_HEADLESS=true + +# Run the Streamlit app +CMD ["streamlit", "run", "app.py", "--server.port=8501", "--server.address=0.0.0.0"] diff --git a/README.md b/README.md index 75f04b9..cf42f34 100644 --- a/README.md +++ b/README.md @@ -92,3 +92,30 @@ If you rely on Gemini for richer responses, monitor your Google Cloud quota and Recommendations / result view: ![Recommendations view](assets/screenshots/recommendations_view.svg) + +Docker / container deployment +-------------------------------- +I added a Dockerfile and docker-compose configuration so you can run the Streamlit app in a container or publish the image to a container registry. + +Run locally with Docker Compose + +1. Build and run: +```bash +docker compose up --build +``` +2. Open the app at: http://localhost:8501 + +Notes: +- The container reads `GOOGLE_API_KEY` from the environment. You can provide it with `export GOOGLE_API_KEY=...` before running `docker compose up`, or create a `.env` file with that variable (do not commit secrets). +- The image exposes port 8501. + +Publish image automatically (GitHub) + +- I added a GitHub Actions workflow `.github/workflows/docker-publish.yml`. On push to `main` it builds the image and pushes it to GitHub Container Registry (GHCR) as `ghcr.io//mindguard:latest`. +- After pushing, you can deploy that container image to any hosting provider that accepts Docker images (Render, Fly.io, Railway, DigitalOcean App Platform, etc.). + +Limitations + +- GitHub itself cannot host running Docker containers behind a public URL; you must deploy the image to a hosting provider or run `docker compose` on a server you control. +- If you want a single-click visitable URL from the repo (no manual server), I can add a recommended provider configuration (for example a Render Docker service) and a GitHub Action that automatically deploys to that provider (requires provider credentials / API key). + diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..3d73b29 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,13 @@ +services: + mindguard: + build: . + image: mindguard:latest + ports: + - "8501:8501" + volumes: + # Mount current repo (read-only) so edits are visible during development + - ./:/app:ro + environment: + # Forward GOOGLE_API_KEY from host or .env + - GOOGLE_API_KEY=${GOOGLE_API_KEY} + restart: unless-stopped From 45f657977ec9581581b330a09274e96267bd199b Mon Sep 17 00:00:00 2001 From: gr1shan1a <368409@edu.itmo.ru> Date: Sun, 9 Nov 2025 23:06:17 +0800 Subject: [PATCH 8/9] <09.11.2025 21:23> --- README.md | 5 ----- 1 file changed, 5 deletions(-) diff --git a/README.md b/README.md index cf42f34..4161953 100644 --- a/README.md +++ b/README.md @@ -112,10 +112,5 @@ Notes: Publish image automatically (GitHub) - I added a GitHub Actions workflow `.github/workflows/docker-publish.yml`. On push to `main` it builds the image and pushes it to GitHub Container Registry (GHCR) as `ghcr.io//mindguard:latest`. -- After pushing, you can deploy that container image to any hosting provider that accepts Docker images (Render, Fly.io, Railway, DigitalOcean App Platform, etc.). -Limitations - -- GitHub itself cannot host running Docker containers behind a public URL; you must deploy the image to a hosting provider or run `docker compose` on a server you control. -- If you want a single-click visitable URL from the repo (no manual server), I can add a recommended provider configuration (for example a Render Docker service) and a GitHub Action that automatically deploys to that provider (requires provider credentials / API key). From acd6302ea406200b779ec3f1290dd1bd666ea412 Mon Sep 17 00:00:00 2001 From: gr1shan1a <368409@edu.itmo.ru> Date: Mon, 10 Nov 2025 02:42:14 +0800 Subject: [PATCH 9/9] <10.11.2025 02:42> --- .github/workflows/docker-publish.yml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml index 2d35b7d..b6ba187 100644 --- a/.github/workflows/docker-publish.yml +++ b/.github/workflows/docker-publish.yml @@ -28,9 +28,16 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} + - name: Compute lowercase owner + id: owner_lower + run: | + # GitHub repository_owner can contain uppercase letters; container registries require lowercase + owner=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]') + echo "owner=$owner" >> "$GITHUB_OUTPUT" + - name: Build and push image uses: docker/build-push-action@v4 with: push: true - tags: ghcr.io/${{ github.repository_owner }}/mindguard:latest + tags: ghcr.io/${{ steps.owner_lower.outputs.owner }}/mindguard:latest file: ./Dockerfile