diff --git a/.editorconfig b/.editorconfig index 11b1d231..3d74d05a 100644 --- a/.editorconfig +++ b/.editorconfig @@ -34,5 +34,5 @@ indent_size = 2 max_line_length = 79 -[{COMMIT_EDITMSG,MERGE_MSG,SQUASH_MSG,git-rebase-todo}] +[{COMMIT_EDITMSG,MERGE_MSG,SQUASH_MSG,TAG_EDITMSG,git-rebase-todo}] max_line_length = 72 diff --git a/.geminiignore b/.geminiignore new file mode 100644 index 00000000..d9de5552 --- /dev/null +++ b/.geminiignore @@ -0,0 +1,6 @@ +.venv +.pytest_cache +__pycache__ +*.sql +*.db + diff --git a/.github/workflows/install-linux.yml b/.github/workflows/install-linux.yml index de6fa69b..05791691 100644 --- a/.github/workflows/install-linux.yml +++ b/.github/workflows/install-linux.yml @@ -15,16 +15,16 @@ jobs: strategy: matrix: - python-version: ["3.10"] + python-version: ["3.11", "3.5"] steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: submodules: recursive - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} # update-environment: false @@ -35,7 +35,7 @@ jobs: # NOTE: see above NOTE, we are still using deprecated cache restore - name: Reload Cache / pip - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: ~/.cache/pip # NOTE: only cares about base requirements.txt @@ -52,6 +52,6 @@ jobs: - name: Basic Tests / CLI / Integration run: | - n -v - nutra -d recipe init -f + n --version + nutra --debug recipe init -f nutra --no-pager recipe diff --git a/.github/workflows/install-win32.yml b/.github/workflows/install-win32.yml index 59b0a6a8..f0b2d107 100644 --- a/.github/workflows/install-win32.yml +++ b/.github/workflows/install-win32.yml @@ -17,18 +17,18 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: submodules: recursive - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} # update-environment: false - name: Reload Cache / pip - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: ~\AppData\Local\pip\Cache # NOTE: only cares about base requirements.txt @@ -43,8 +43,8 @@ jobs: - name: Basic Tests / CLI / Integration run: | - n -v - nutra -d init -y + n --version + nutra --debug init -y nutra --no-pager nt nutra --no-pager sort -c 789 nutra --no-pager search ultraviolet mushrooms diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 3cc28231..35e22aa8 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -16,7 +16,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: submodules: recursive @@ -24,7 +24,7 @@ jobs: run: git fetch origin master - name: Reload Cache / pip - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: 3 cache: "pip" # caching pip dependencies diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 9372dd23..0facea6f 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -12,7 +12,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: submodules: recursive diff --git a/.pylintrc b/.pylintrc index a92217f0..565785d2 100644 --- a/.pylintrc +++ b/.pylintrc @@ -1,6 +1,6 @@ [MASTER] -fail-under=9.93 +fail-under=9.95 [MESSAGES CONTROL] diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 23c6b483..eae36500 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -12,6 +12,18 @@ and this project adheres to `Semantic Versioning , valid commands:\n\n" - @grep "##" $(MAKEFILE_LIST) | grep -v IGNORE_ME | sed -e 's/##//' | column -t -s $$'\t' + @grep "##" $(MAKEFILE_LIST) | grep -v ^# | grep -v IGNORE_ME | sed -e 's/##//' | column -t -s $$'\t' @@ -16,11 +16,15 @@ _help: .PHONY: init init: ## Set up a Python virtual environment + # Fetch submodule git submodule update --init + # Re-add virtual environment rm -rf .venv ${PY_SYS_INTERPRETER} -m venv .venv + # Upgrade dependencies and pip, if NOT running in CI automation - if [ -z "${CI}" ]; then ${PY_SYS_INTERPRETER} -m venv --upgrade-deps .venv; fi direnv allow + @echo "INFO: Successfully initialized venv, run 'make deps' now!" # include .env SKIP_VENV ?= @@ -29,7 +33,8 @@ PWD ?= $(shell pwd) .PHONY: _venv _venv: # Test to enforce venv usage across important make targets - [ "${SKIP_VENV}" ] || [ "${PYTHON}" = "${PWD}/.venv/bin/python" ] + test "${SKIP_VENV}" || test "${PYTHON}" = "${PWD}/.venv/bin/python" + @echo "OK" @@ -54,10 +59,12 @@ REQ_LINT := requirements-lint.txt REQ_TEST := requirements-test.txt REQ_TEST_OLD := requirements-test-old.txt +# TODO: this is a fragile hack (to get it to work in CI and locally too) PIP_OPT_ARGS ?= $(shell if [ "$(SKIP_VENV)" ]; then echo "--user"; fi) .PHONY: deps deps: _venv ## Install requirements + # Install requirements ${PIP} install wheel ${PIP} install ${PIP_OPT_ARGS} -r requirements.txt - ${PIP} install ${PIP_OPT_ARGS} -r ${REQ_OPT} @@ -69,42 +76,65 @@ deps: _venv ## Install requirements # Format, lint, test # --------------------------------------- -.PHONY: format -format: _venv ## Format with isort & black - if [ "${CHANGED_FILES_PY_FLAG}" ]; then isort ${CHANGED_FILES_PY} ; fi - if [ "${CHANGED_FILES_PY_FLAG}" ]; then black ${CHANGED_FILES_PY} ; fi - - -LINT_LOCS := ntclient/ tests/ setup.py +# LINT_LOCS := ntclient/ tests/ setup.py CHANGED_FILES_RST ?= $(shell git diff origin/master --name-only --diff-filter=MACRU \*.rst) CHANGED_FILES_PY ?= $(shell git diff origin/master --name-only --diff-filter=MACRU \*.py) -CHANGED_FILES_PY_FLAG ?= $(shell if [ "$(CHANGED_FILES_PY)" ]; then echo 1; fi) + +.PHONY: format +format: _venv ## Format with isort & black +ifneq ($(CHANGED_FILES_PY),) + isort ${CHANGED_FILES_PY} + black ${CHANGED_FILES_PY} + -git --no-pager diff --stat + @echo "OK" + @git diff --quiet || echo "NOTE: You may want to run: git add ." +else + $(info No changed Python files, skipping.) +endif .PHONY: lint lint: _venv ## Lint code and documentation +ifneq ($(CHANGED_FILES_RST),) # lint RST - if [ "${CHANGED_FILES_RST}" ]; then doc8 --quiet ${CHANGED_FILES_RST}; fi + doc8 --quiet ${CHANGED_FILES_RST} + @echo "OK" +else + $(info No changed RST files, skipping.) +endif +ifneq ($(CHANGED_FILES_PY),) # check formatting: Python - if [ "${CHANGED_FILES_PY_FLAG}" ]; then isort --diff --check ${CHANGED_FILES_PY} ; fi - if [ "${CHANGED_FILES_PY_FLAG}" ]; then black --check ${CHANGED_FILES_PY} ; fi + isort --diff --check ${CHANGED_FILES_PY} + black --check ${CHANGED_FILES_PY} # lint Python - if [ "${CHANGED_FILES_PY_FLAG}" ]; then pycodestyle --statistics ${CHANGED_FILES_PY}; fi - if [ "${CHANGED_FILES_PY_FLAG}" ]; then bandit -q -c .banditrc -r ${CHANGED_FILES_PY}; fi - if [ "${CHANGED_FILES_PY_FLAG}" ]; then flake8 ${CHANGED_FILES_PY}; fi - if [ "${CHANGED_FILES_PY_FLAG}" ]; then mypy ${CHANGED_FILES_PY}; fi - if [ "${CHANGED_FILES_PY_FLAG}" ]; then pylint ${CHANGED_FILES_PY}; fi + pycodestyle --statistics ${CHANGED_FILES_PY} + bandit -q -c .banditrc -r ${CHANGED_FILES_PY} + flake8 ${CHANGED_FILES_PY} + mypy ${CHANGED_FILES_PY} + pylint ${CHANGED_FILES_PY} + @echo "OK" +else + $(info No changed Python files, skipping.) +endif .PHONY: pylint pylint: - if [ "${CHANGED_FILES_PY_FLAG}" ]; then pylint ${CHANGED_FILES_PY}; fi +ifneq ($(CHANGED_FILES_PY),) + pylint ${CHANGED_FILES_PY} +else + $(info No changed Python files, skipping.) +endif .PHONY: mypy mypy: - if [ "${CHANGED_FILES_PY_FLAG}" ]; then mypy ${CHANGED_FILES_PY}; fi +ifneq ($(CHANGED_FILES_PY),) + mypy ${CHANGED_FILES_PY} +else + $(info No changed Python files, skipping.) +endif .PHONY: test -test: _venv ## Run CLI unittests +test: _venv ## Run CLI unit tests coverage run coverage report - grep fail_under setup.cfg @@ -144,7 +174,7 @@ install: ## pip install . ${PY_SYS_INTERPRETER} -m pip install . || ${PY_SYS_INTERPRETER} -m pip install --user . ${PY_SYS_INTERPRETER} -m pip show nutra - ${PY_SYS_INTERPRETER} -c 'import shutil; print(shutil.which("nutra"));' - nutra -v + nutra --version @@ -156,7 +186,8 @@ RECURSIVE_CLEAN_LOCS ?= $(shell find ntclient/ tests/ \ -name __pycache__ \ -o -name .coverage \ -o -name .mypy_cache \ --o -name .pytest_cache) +-o -name .pytest_cache \ +) .PHONY: clean clean: ## Clean up __pycache__ and leftover bits @@ -164,8 +195,10 @@ clean: ## Clean up __pycache__ and leftover bits rm -rf build/ rm -rf nutra.egg-info/ rm -rf .pytest_cache/ .mypy_cache/ +ifneq ($(RECURSIVE_CLEAN_LOCS),) # Recursively find & remove - if [ "${RECURSIVE_CLEAN_LOCS}" ]; then rm -rf ${RECURSIVE_CLEAN_LOCS}; fi + rm -rf ${RECURSIVE_CLEAN_LOCS} +endif @@ -175,4 +208,4 @@ clean: ## Clean up __pycache__ and leftover bits .PHONY: extras/cloc extras/cloc: ## Count lines of source code - - cloc HEAD + - cloc HEAD ntclient/ntsqlite diff --git a/README.rst b/README.rst index 84d70e6b..2da309a3 100644 --- a/README.rst +++ b/README.rst @@ -100,7 +100,7 @@ Install with, HOOK='eval "$(direnv hook '$DEFAULT_SHELL')"' # Install the hook, if not already - grep "$HOOK" $SHELL_RC_FILE || echo "$HOOK" >>$SHELL_RC_FILE + grep ^"$HOOK"$ $SHELL_RC_FILE || echo "$HOOK" >>$SHELL_RC_FILE source $SHELL_RC_FILE This is what the ``.envrc`` file is for. It automatically activates ``venv``. @@ -269,4 +269,5 @@ Usage Requires internet connection to download initial datasets. Run ``nutra init`` for this step. -Run ``n`` or ``nutra`` to output usage (``-h`` flag is optional and defaulted). +Run ``n`` or ``nutra`` to output usage (``--help`` flag is optional and +defaulted). diff --git a/ntclient/__init__.py b/ntclient/__init__.py index 1bbce431..1b973b85 100644 --- a/ntclient/__init__.py +++ b/ntclient/__init__.py @@ -16,7 +16,7 @@ # Package info __title__ = "nutra" -__version__ = "0.2.7" +__version__ = "0.2.8.dev2" __author__ = "Shane Jaroch" __email__ = "chown_tee@proton.me" __license__ = "GPL v3" @@ -24,30 +24,45 @@ __url__ = "https://github.com/nutratech/cli" # Sqlite target versions -__db_target_nt__ = "0.0.6" -__db_target_usda__ = "0.0.8" +# TODO: should this be via versions.csv file? Don't update in two places? +__db_target_nt__ = "0.0.7" +__db_target_usda__ = "0.0.10" USDA_XZ_SHA256 = "25dba8428ced42d646bec704981d3a95dc7943240254e884aad37d59eee9616a" # Global variables PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__)) -NUTRA_HOME = os.getenv("NUTRA_HOME", os.path.join(os.path.expanduser("~"), ".nutra")) -USDA_DB_NAME = "usda.sqlite" +NUTRA_HOME = os.getenv( + "NUTRA_HOME", + os.getenv("NUTRA_DIR", os.path.join(os.path.expanduser("~"), ".nutra")), +) +USDA_DB_NAME = "usda.sqlite3" # NOTE: NT_DB_NAME = "nt.sqlite3" is defined in ntclient.ntsqlite.sql NTSQLITE_BUILDPATH = os.path.join(PROJECT_ROOT, "ntsqlite", "sql", NT_DB_NAME) NTSQLITE_DESTINATION = os.path.join(NUTRA_HOME, NT_DB_NAME) -# Check Python version + +def version_check() -> None: + """Check Python version""" + # pylint: disable=global-statement + global PY_SYS_VER, PY_SYS_STR + PY_SYS_VER = sys.version_info[0:3] + PY_SYS_STR = ".".join(str(x) for x in PY_SYS_VER) + + if PY_SYS_VER < PY_MIN_VER: + # TODO: make this testable with: `class CliConfig`? + raise RuntimeError( + "ERROR: %s requires Python %s or later to run" % (__title__, PY_MIN_STR), + "HINT: You're running Python %s" % PY_SYS_STR, + ) + + PY_MIN_VER = (3, 4, 3) PY_SYS_VER = sys.version_info[0:3] PY_MIN_STR = ".".join(str(x) for x in PY_MIN_VER) PY_SYS_STR = ".".join(str(x) for x in PY_SYS_VER) -if PY_SYS_VER < PY_MIN_VER: - # TODO: make this testable with: `class CliConfig`? - raise RuntimeError( # pragma: no cover - "ERROR: %s requires Python %s or later to run" % (__title__, PY_MIN_STR), - "HINT: You're running Python %s" % PY_SYS_STR, - ) +# Run the check +version_check() # Console size, don't print more than it BUFFER_WD = shutil.get_terminal_size()[0] diff --git a/ntclient/__main__.py b/ntclient/__main__.py index ccde9616..54353d01 100644 --- a/ntclient/__main__.py +++ b/ntclient/__main__.py @@ -25,6 +25,7 @@ from ntclient.argparser import build_subcommands from ntclient.utils import CLI_CONFIG from ntclient.utils.exceptions import SqlException +from ntclient.utils.sql import handle_runtime_exception def build_arg_parser() -> argparse.ArgumentParser: @@ -32,7 +33,6 @@ def build_arg_parser() -> argparse.ArgumentParser: arg_parser = argparse.ArgumentParser(prog=__title__) arg_parser.add_argument( - "-v", "--version", action="version", version="{0} cli version {1} ".format(__title__, __version__) @@ -40,7 +40,7 @@ def build_arg_parser() -> argparse.ArgumentParser: ) arg_parser.add_argument( - "-d", "--debug", action="store_true", help="enable detailed error messages" + "--debug", action="store_true", help="enable detailed error messages" ) arg_parser.add_argument( "--no-pager", action="store_true", help="disable paging (print full output)" @@ -102,23 +102,21 @@ def func(parser: argparse.Namespace) -> tuple: exit_code, *_results = func(_parser) except SqlException as sql_exception: # pragma: no cover print("Issue with an sqlite database: " + repr(sql_exception)) - if CLI_CONFIG.debug: - raise + handle_runtime_exception(args, sql_exception) except HTTPError as http_error: # pragma: no cover err_msg = "{0}: {1}".format(http_error.code, repr(http_error)) print("Server response error, try again: " + err_msg) - if CLI_CONFIG.debug: - raise + handle_runtime_exception(args, http_error) except URLError as url_error: # pragma: no cover print("Connection error, check your internet: " + repr(url_error.reason)) - if CLI_CONFIG.debug: - raise + handle_runtime_exception(args, url_error) except Exception as exception: # pylint: disable=broad-except # pragma: no cover - print("Unforeseen error, run with -d for more info: " + repr(exception)) + print("Unforeseen error, run with --debug for more info: " + repr(exception)) print("You can open an issue here: %s" % __url__) print("Or send me an email with the debug output: %s" % __email__) - if CLI_CONFIG.debug: - raise + print("Or, run the bug report command.") + print() + handle_runtime_exception(args, exception) finally: if CLI_CONFIG.debug: exc_time = time.time() - start_time @@ -126,3 +124,9 @@ def func(parser: argparse.Namespace) -> tuple: print("Exit code: %s" % exit_code) return exit_code + + +if __name__ == "__main__": + import sys + + sys.exit(main()) diff --git a/ntclient/argparser/__init__.py b/ntclient/argparser/__init__.py index b2f87413..7a393091 100644 --- a/ntclient/argparser/__init__.py +++ b/ntclient/argparser/__init__.py @@ -15,21 +15,23 @@ def build_subcommands(subparsers: argparse._SubParsersAction) -> None: """Attaches subcommands to main parser""" - build_init_subcommand(subparsers) - build_nt_subcommand(subparsers) - build_search_subcommand(subparsers) - build_sort_subcommand(subparsers) - build_analyze_subcommand(subparsers) - build_day_subcommand(subparsers) - build_recipe_subcommand(subparsers) - build_calc_subcommand(subparsers) + build_subcommand_init(subparsers) + build_subcommand_nt(subparsers) + build_subcommand_search(subparsers) + build_subcommand_sort(subparsers) + build_subcommand_analyze(subparsers) + build_subcommand_day(subparsers) + build_subcommand_log(subparsers) + build_subcommand_recipe(subparsers) + build_subcommand_calc(subparsers) + build_subcommand_bug(subparsers) ################################################################################ # Methods to build subparsers, and attach back to main arg_parser ################################################################################ # noinspection PyUnresolvedReferences,PyProtectedMember -def build_init_subcommand(subparsers: argparse._SubParsersAction) -> None: +def build_subcommand_init(subparsers: argparse._SubParsersAction) -> None: """Self running init command""" init_parser = subparsers.add_parser( @@ -45,7 +47,7 @@ def build_init_subcommand(subparsers: argparse._SubParsersAction) -> None: # noinspection PyUnresolvedReferences,PyProtectedMember -def build_nt_subcommand(subparsers: argparse._SubParsersAction) -> None: +def build_subcommand_nt(subparsers: argparse._SubParsersAction) -> None: """Lists out nutrients details with computed totals and averages""" nutrient_parser = subparsers.add_parser( @@ -55,7 +57,7 @@ def build_nt_subcommand(subparsers: argparse._SubParsersAction) -> None: # noinspection PyUnresolvedReferences,PyProtectedMember -def build_search_subcommand(subparsers: argparse._SubParsersAction) -> None: +def build_subcommand_search(subparsers: argparse._SubParsersAction) -> None: """Search: terms [terms ... ]""" search_parser = subparsers.add_parser( @@ -83,7 +85,7 @@ def build_search_subcommand(subparsers: argparse._SubParsersAction) -> None: # noinspection PyUnresolvedReferences,PyProtectedMember -def build_sort_subcommand(subparsers: argparse._SubParsersAction) -> None: +def build_subcommand_sort(subparsers: argparse._SubParsersAction) -> None: """Sort foods ranked by nutr_id, per 100g or 200kcal""" sort_parser = subparsers.add_parser("sort", help="sort foods by nutrient ID") @@ -106,10 +108,12 @@ def build_sort_subcommand(subparsers: argparse._SubParsersAction) -> None: # noinspection PyUnresolvedReferences,PyProtectedMember -def build_analyze_subcommand(subparsers: argparse._SubParsersAction) -> None: +def build_subcommand_analyze(subparsers: argparse._SubParsersAction) -> None: """Analyzes (foods only for now)""" - analyze_parser = subparsers.add_parser("anl", help="analyze food(s)") + analyze_parser = subparsers.add_parser( + "anl", help="analyze food(s), recipe(s), or day(s)" + ) analyze_parser.add_argument( "-g", @@ -117,12 +121,26 @@ def build_analyze_subcommand(subparsers: argparse._SubParsersAction) -> None: type=float, help="scale to custom number of grams (default is 100g)", ) + analyze_parser.add_argument( + "-s", + dest="scale", + metavar="N", + type=float, + help="scale actual values to N (default: kcal)", + ) + analyze_parser.add_argument( + "-m", + dest="scale_mode", + metavar="MODE", + type=str, + help="scale mode: 'kcal', 'weight', or nutrient name/ID", + ) analyze_parser.add_argument("food_id", type=int, nargs="+") analyze_parser.set_defaults(func=parser_funcs.analyze) # noinspection PyUnresolvedReferences,PyProtectedMember -def build_day_subcommand(subparsers: argparse._SubParsersAction) -> None: +def build_subcommand_day(subparsers: argparse._SubParsersAction) -> None: """Analyzes a DAY.csv, uses new colored progress bar spec""" day_parser = subparsers.add_parser( @@ -142,11 +160,25 @@ def build_day_subcommand(subparsers: argparse._SubParsersAction) -> None: type=types.file_path, help="provide a custom RDA file in csv format", ) + day_parser.add_argument( + "-s", + dest="scale", + metavar="N", + type=float, + help="scale actual values to N (default: kcal)", + ) + day_parser.add_argument( + "-m", + dest="scale_mode", + metavar="MODE", + type=str, + help="scale mode: 'kcal', 'weight', or nutrient name/ID", + ) day_parser.set_defaults(func=parser_funcs.day) # noinspection PyUnresolvedReferences,PyProtectedMember -def build_recipe_subcommand(subparsers: argparse._SubParsersAction) -> None: +def build_subcommand_recipe(subparsers: argparse._SubParsersAction) -> None: """View, add, edit, delete recipes""" recipe_parser = subparsers.add_parser("recipe", help="list and analyze recipes") @@ -179,11 +211,25 @@ def build_recipe_subcommand(subparsers: argparse._SubParsersAction) -> None: recipe_anl_parser.add_argument( "path", type=str, help="view (and analyze) recipe by file path" ) + recipe_anl_parser.add_argument( + "-s", + dest="scale", + metavar="N", + type=float, + help="scale actual values to N (default: kcal)", + ) + recipe_anl_parser.add_argument( + "-m", + dest="scale_mode", + metavar="MODE", + type=str, + help="scale mode: 'kcal', 'weight', or nutrient name/ID", + ) recipe_anl_parser.set_defaults(func=parser_funcs.recipe) # noinspection PyUnresolvedReferences,PyProtectedMember -def build_calc_subcommand(subparsers: argparse._SubParsersAction) -> None: +def build_subcommand_calc(subparsers: argparse._SubParsersAction) -> None: """BMR, 1 rep-max, and other calculators""" calc_parser = subparsers.add_parser( @@ -317,3 +363,55 @@ def build_calc_subcommand(subparsers: argparse._SubParsersAction) -> None: "ankle", type=float, nargs="?", help="ankle (cm) [casey_butt]" ) calc_lbl_parser.set_defaults(func=parser_funcs.calc_lbm_limits) + + +# noinspection PyUnresolvedReferences,PyProtectedMember +def build_subcommand_bug(subparsers: argparse._SubParsersAction) -> None: + """List and report bugs""" + + bug_parser = subparsers.add_parser("bug", help="report bugs") + bug_subparser = bug_parser.add_subparsers(title="bug subcommands") + bug_parser.add_argument( + "--show", action="store_true", help="show list of unsubmitted bugs" + ) + bug_parser.set_defaults(func=parser_funcs.bugs_list) + + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + # Simulate (bug) + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + bug_simulate_parser = bug_subparser.add_parser( + "simulate", help="simulate a bug (for testing purposes)" + ) + bug_simulate_parser.set_defaults(func=parser_funcs.bug_simulate) + + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + # Report (bug) + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + bug_report_parser = bug_subparser.add_parser( + "report", help="submit/report all bugs" + ) + bug_report_parser.set_defaults(func=parser_funcs.bugs_report) + + +# noinspection PyUnresolvedReferences,PyProtectedMember +def build_subcommand_log(subparsers: argparse._SubParsersAction) -> None: + """Log management: add, view, analyze""" + log_parser = subparsers.add_parser("log", help="manage daily food logs") + log_subparsers = log_parser.add_subparsers(dest="subcommand", required=True) + + # ADD + add_parser = log_subparsers.add_parser("add", help="add food to log") + add_parser.add_argument("food_id", type=int, help="food ID") + add_parser.add_argument("grams", type=float, help="amount in grams") + add_parser.add_argument("-d", "--date", help="date YYYY-MM-DD (default: today)") + add_parser.set_defaults(func=parser_funcs.log_add) + + # VIEW + view_parser = log_subparsers.add_parser("view", help="view log entries") + view_parser.add_argument("-d", "--date", help="date YYYY-MM-DD (default: today)") + view_parser.set_defaults(func=parser_funcs.log_view) + + # ANALYZE + anl_parser = log_subparsers.add_parser("anl", help="analyze log") + anl_parser.add_argument("-d", "--date", help="date YYYY-MM-DD (default: today)") + anl_parser.set_defaults(func=parser_funcs.log_analyze) diff --git a/ntclient/argparser/funcs.py b/ntclient/argparser/funcs.py index aa177de0..2d591afe 100644 --- a/ntclient/argparser/funcs.py +++ b/ntclient/argparser/funcs.py @@ -14,7 +14,9 @@ from tabulate import tabulate import ntclient.services.analyze -import ntclient.services.recipe.utils +import ntclient.services.bugs +import ntclient.services.logs +import ntclient.services.recipe.recipe import ntclient.services.usda from ntclient.services import calculate as calc from ntclient.utils import CLI_CONFIG, Gender, activity_factor_from_index @@ -58,18 +60,24 @@ def analyze(args: argparse.Namespace) -> tuple: """Analyze a food""" # exc: ValueError, food_ids = set(args.food_id) - grams = float(args.grams) if args.grams else 0.0 + grams = float(args.grams) if args.grams else 100.0 + scale = float(args.scale) if args.scale else 0.0 + scale_mode = args.scale_mode if args.scale_mode else "kcal" - return ntclient.services.analyze.foods_analyze(food_ids, grams) + return ntclient.services.analyze.foods_analyze( + food_ids, grams, scale=scale, scale_mode=scale_mode + ) def day(args: argparse.Namespace) -> tuple: """Analyze a day's worth of meals""" day_csv_paths = [str(os.path.expanduser(x)) for x in args.food_log] rda_csv_path = str(os.path.expanduser(args.rda)) if args.rda else str() + scale = float(args.scale) if args.scale else 0.0 + scale_mode = args.scale_mode if args.scale_mode else "kcal" return ntclient.services.analyze.day_analyze( - day_csv_paths, rda_csv_path=rda_csv_path + day_csv_paths, rda_csv_path=rda_csv_path, scale=scale, scale_mode=scale_mode ) @@ -80,12 +88,12 @@ def recipes_init(args: argparse.Namespace) -> tuple: """Copy example/stock data into RECIPE_HOME""" _force = args.force - return ntclient.services.recipe.utils.recipes_init(_force=_force) + return ntclient.services.recipe.recipe.recipes_init(_force=_force) def recipes() -> tuple: """Show all, in tree or detail view""" - return ntclient.services.recipe.utils.recipes_overview() + return ntclient.services.recipe.recipe.recipes_overview() def recipe(args: argparse.Namespace) -> tuple: @@ -95,8 +103,12 @@ def recipe(args: argparse.Namespace) -> tuple: @todo: use as default command? Currently this is reached by `nutra recipe anl` """ recipe_path = args.path + scale = float(args.scale) if args.scale else 0.0 + scale_mode = args.scale_mode if args.scale_mode else "kcal" - return ntclient.services.recipe.utils.recipe_overview(recipe_path=recipe_path) + return ntclient.services.recipe.recipe.recipe_overview( + recipe_path=recipe_path, scale=scale, scale_mode=scale_mode + ) ############################################################################## @@ -133,10 +145,18 @@ def calc_1rm(args: argparse.Namespace) -> tuple: row.append(int(_values[_rep])) _all.append(row) + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Print results + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ print() print("Results for: epley, brzycki, and dos_remedios") print() + + # Print the n=1 average for all three calculations + _avg_1rm = round(sum(_all[0][1:]) / len(_all[0][1:]), 1) + print("1RM: %s" % _avg_1rm) + print() + _table = tabulate(_all, headers=["n", "epl", "brz", "rmds"]) print(_table) @@ -327,3 +347,45 @@ def calc_lbm_limits(args: argparse.Namespace) -> tuple: print(_table) return 0, result + + +############################################################################## +# Bug +############################################################################## +# TODO: these all require args parameter (due to parent parser defining a `--show` arg) + + +# pylint: disable=unused-argument +def bug_simulate(args: argparse.Namespace) -> tuple: + """Simulate a bug report""" + raise NotImplementedError("This service intentionally raises an error, for testing") + + +def bugs_list(args: argparse.Namespace) -> tuple: + """List bug reports that have been saved""" + return ntclient.services.bugs.list_bugs(show_all=args.show) + + +# pylint: disable=unused-argument +def bugs_report(args: argparse.Namespace) -> tuple: + """Report bugs""" + n_submissions = ntclient.services.bugs.submit_bugs() + return 0, n_submissions + + +def log_add(args: argparse.Namespace) -> tuple: + """Wrapper for log add""" + ntclient.services.logs.log_add(args.food_id, args.grams, args.date) + return 0, [] + + +def log_view(args: argparse.Namespace) -> tuple: + """Wrapper for log view""" + ntclient.services.logs.log_view(args.date) + return 0, [] + + +def log_analyze(args: argparse.Namespace) -> tuple: + """Wrapper for log analyze""" + ntclient.services.logs.log_analyze(args.date) + return 0, [] diff --git a/ntclient/core/nnest.py b/ntclient/core/nnest.py index fba911cf..f3a75f21 100644 --- a/ntclient/core/nnest.py +++ b/ntclient/core/nnest.py @@ -3,10 +3,34 @@ Created on Sat Aug 29 19:43:55 2020 @author: shane + +@todo +Think about all the use cases for the "nested" nutrient tree. Analyzing a recipe, +a food, meal. How to display the data, or filter, reverse search, sort, etc. """ + +# pylint: disable=too-few-public-methods +class Nutrient: + """Tracks properties of nutrients; used in the tree structure of nutrient groups""" + + def __init__(self, nut_id: int, name: str, hidden: bool = False): + self.nut_id = nut_id + self.name = name + self.hidden = hidden + self.rounded_rda = 0 # TODO: round day/recipe analysis to appropriate digit + + nnest = { - "basics": ["Protein", "Carbs", "Fats", "Fiber", "Calories"], + # "basics": ["Protein", "Carbs", "Fats", "Fiber", "Calories"], + "basics": { + # 203: {"name": "Protein", "hidden": False}, + 203: Nutrient(203, "Protein"), + 205: "Carbs", + 204: "Fats", + 291: "Fiber", + 208: "Calories (kcal)", + }, "macro_details": {"Carbs": {}, "Fat": {}}, "micro_nutrients": { "Vitamins": {"Water-Soluble": {}, "Fat-Soluble": {}}, diff --git a/ntclient/core/nnest.rst b/ntclient/core/nnest.rst new file mode 100644 index 00000000..6721d036 --- /dev/null +++ b/ntclient/core/nnest.rst @@ -0,0 +1,125 @@ +from ChatGPT: + + +Here are the grouped categories and sub-categories of related nutrients based +on the provided table: + +1. Protein: + + - Protein (203) + +2. Fat: + + - Total lipid (fat) (204) + - Fatty acids: + + - Total trans (605) + - Total saturated (606) + - Total monounsaturated (645) + - Total polyunsaturated (646) + +3. Carbohydrates: + + - Carbohydrate, by difference (205) + - Sugars: + + - Sucrose (210) + - Glucose (dextrose) (211) + - Fructose (212) + - Lactose (213) + - Maltose (214) + - Sugars, total (269) + + - Starch (209) + - Fiber, total dietary (291) + +4. Minerals: + + - Electrolytes: + + - Potassium, K (306) + - Sodium, Na (307) + - Magnesium, Mg (304) + - Calcium, Ca (301) + + - Iron, Fe (303) + - Phosphorus, P (305) + - Zinc, Zn (309) + - Copper, Cu (312) + - Fluoride, F (313) + - Manganese, Mn (315) + - Selenium, Se (317) + +5. Vitamins: + + - Vitamin A: + + - Vitamin A, IU (318) + - Retinol (319) + - Vitamin A, RAE (320) + - Carotene: + + - Carotene, beta (321) + - Carotene, alpha (322) + + - Vitamin E: + + - Vitamin E (alpha-tocopherol) (323) + - Vitamin E, added (573) + + - Vitamin D: + + - Vitamin D (324) + - Vitamin D2 (ergocalciferol) (325) + - Vitamin D3 (cholecalciferol) (326) + - Vitamin D (D2 + D3) (328) + + - Vitamin C: + + - Vitamin C, total ascorbic acid (401) + + - B Vitamins: + + - Thiamin (404) + - Riboflavin (405) + - Niacin (406) + - Pantothenic acid (410) + - Vitamin B-6 (415) + - Folate: + + - Folate, total (417) + - Folic acid (431) + - Folate, food (432) + - Folate, DFE (435) + + - Vitamin B-12: + + - Vitamin B-12 (418) + - Vitamin B-12, added (578) + + - Choline, total (421) + + - Vitamin K: + + - Vitamin K (phylloquinone) (430) + +6. Other Organic Compounds: + + - Water (255) + - Caffeine (262) + - Theobromine (263) + - Betaine (454) + - Cholesterol (601) + - Phytosterols: + + - Phytosterols (636) + - Stigmasterol (638) + - Campesterol (639) + - Beta-sitosterol (641) + + - Phytochemicals (e.g., flavonoids, isoflavones) and their subcategories + - Ash (207) + +Please note that this list may not include all possible nutrient categories and +sub-categories, and there may be other ways to categorize them as well. + diff --git a/ntclient/core/nnr2.py b/ntclient/core/nnr2.py index bfe769ec..3e405d92 100644 --- a/ntclient/core/nnr2.py +++ b/ntclient/core/nnr2.py @@ -3,7 +3,7 @@ Created on Fri Jul 31 21:23:51 2020 @author: shane -""" -# NOTE: based on -# +NOTE: based on: + +""" diff --git a/ntclient/core/nutprogbar.py b/ntclient/core/nutprogbar.py index 21ced4f9..47c27c2b 100644 --- a/ntclient/core/nutprogbar.py +++ b/ntclient/core/nutprogbar.py @@ -1,38 +1,166 @@ """Temporary [wip] module for more visual (& colorful) RDA output""" +from typing import Mapping -def nutprogbar(food_amts: dict, food_analyses: list, nutrients: dict) -> dict: - """Returns progress bars, colorized, for foods analyses""" +from ntclient.utils import CLI_CONFIG - def tally() -> None: - for nut in nut_percs: - # TODO: get RDA values from nt DB, tree node nested organization - print(nut) +def nutrient_progress_bars( + _food_amts: Mapping[int, float], + _food_analyses: list, + _nutrients: Mapping[int, tuple], + # grams: float = 100, + # width: int = 50, +) -> Mapping[int, float]: + """ + Returns progress bars, colorized, for foods analyses + @TODO add option to scale up to 2000 kcal (or configured RDA value) + @TODO consider organizing the numbers into a table, with the colored bar in one slot + """ + + def print_bars() -> int: + """Print the progress bars, return n_skipped""" + n_skipped = 0 + for nut in nut_amts.items(): + nutr_id, nutr_val = nut + + # Skip if nutr_val == 0.0 + if not nutr_val: + n_skipped += 1 + continue + + # Print bars + print_nutrient_bar(nutr_id, nutr_val, _nutrients) + + return n_skipped + + # Organize data into a dict> food_analyses_dict = { - x[0]: {y[1]: y[2] for y in food_analyses if y[0] == x[0]} for x in food_analyses + int(x[0]): {int(y[1]): float(y[2]) for y in _food_analyses if y[0] == x[0]} + # NOTE: each analysis is a list of tuples, i.e. (11233, 203, 2.92) + for x in _food_analyses } - # print(food_ids) - # print(food_analyses) - + # Tally the nutrient totals nut_amts = {} - - for food_id, grams in food_amts.items(): - # r = grams / 100.0 + for food_id in _food_amts.keys(): analysis = food_analyses_dict[food_id] for nutrient_id, amt in analysis.items(): if nutrient_id not in nut_amts: - nut_amts[nutrient_id] = amt - else: - nut_amts[nutrient_id] += amt + nut_amts[int(nutrient_id)] = amt + else: # pragma: no cover + # nut_amts[int(nutrient_id)] += amt + raise ValueError("Not implemented yet, need to sum up nutrient amounts") + + print_bars() + return nut_amts + + +def print_nutrient_bar( + _n_id: int, _amount: float, _nutrients: Mapping[int, tuple] +) -> tuple: + """Print a single color-coded nutrient bar""" + nutrient = _nutrients[_n_id] + rda = nutrient[1] + tag = nutrient[3] + unit = nutrient[2] + # anti = nutrient[5] + # hidden = nutrient[...?] + + # TODO: get RDA values from nt DB, tree node nested organization + if not rda: + return False, nutrient + attain = _amount / rda + perc = round(100 * attain, 1) + + if attain >= CLI_CONFIG.thresh_over: + color = CLI_CONFIG.color_over + elif attain <= CLI_CONFIG.thresh_crit: + color = CLI_CONFIG.color_crit + elif attain <= CLI_CONFIG.thresh_warn: + color = CLI_CONFIG.color_warn + else: + color = CLI_CONFIG.color_default + + # Print + detail_amount = "{0}/{1} {2}".format(round(_amount, 1), rda, unit).ljust(18) + detail_amount = "{0} -- {1}".format(detail_amount, tag) + left_index = 20 + left_pos = round(left_index * attain) if attain < 1 else left_index + print(" {0}<".format(color), end="") + print("=" * left_pos + " " * (left_index - left_pos) + ">", end="") + print(" {0}%\t[{1}]".format(perc, detail_amount), end="") + print(CLI_CONFIG.style_reset_all) + + return True, perc + + +def print_macro_bar( + _fat: float, _net_carb: float, _pro: float, _kcals_max: float, _buffer: int = 0 +) -> None: + """Print macro-nutrients bar with details.""" + _kcals = _fat * 9 + _net_carb * 4 + _pro * 4 + + if _kcals == 0: + p_fat = 0.0 + p_car = 0.0 + p_pro = 0.0 + else: + p_fat = (_fat * 9) / _kcals + p_car = (_net_carb * 4) / _kcals + p_pro = (_pro * 4) / _kcals + + # TODO: handle rounding cases, tack on to, or trim off FROM LONGEST ? + mult = _kcals / _kcals_max + n_fat = round(p_fat * _buffer * mult) + n_car = round(p_car * _buffer * mult) + n_pro = round(p_pro * _buffer * mult) + + # Headers + f_buf = " " * (n_fat // 2) + "Fat" + " " * (n_fat - n_fat // 2 - 3) + c_buf = " " * (n_car // 2) + "Carbs" + " " * (n_car - n_car // 2 - 5) + p_buf = " " * (n_pro // 2) + "Pro" + " " * (n_pro - n_pro // 2 - 3) + print( + " " + + CLI_CONFIG.color_yellow + + f_buf + + CLI_CONFIG.color_blue + + c_buf + + CLI_CONFIG.color_red + + p_buf + + CLI_CONFIG.style_reset_all + ) + + # Bars + print(" <", end="") + print(CLI_CONFIG.color_yellow + "=" * n_fat, end="") + print(CLI_CONFIG.color_blue + "=" * n_car, end="") + print(CLI_CONFIG.color_red + "=" * n_pro, end="") + print(CLI_CONFIG.style_reset_all + ">") - nut_percs = {} + # Calorie footers + k_fat = str(round(_fat * 9)) + k_car = str(round(_net_carb * 4)) + k_pro = str(round(_pro * 4)) + f_buf = " " * (n_fat // 2) + k_fat + " " * (n_fat - n_fat // 2 - len(k_fat)) + c_buf = " " * (n_car // 2) + k_car + " " * (n_car - n_car // 2 - len(k_car)) + p_buf = " " * (n_pro // 2) + k_pro + " " * (n_pro - n_pro // 2 - len(k_pro)) + print( + " " + + CLI_CONFIG.color_yellow + + f_buf + + CLI_CONFIG.color_blue + + c_buf + + CLI_CONFIG.color_red + + p_buf + + CLI_CONFIG.style_reset_all + ) - for nutrient_id, amt in nut_amts.items(): - # TODO: if not rda, show raw amounts? - if isinstance(nutrients[nutrient_id][1], float): - nut_percs[nutrient_id] = round(amt / nutrients[nutrient_id][1], 3) - tally() - return nut_percs +def print_header(_header: str) -> None: + """Print a colorized header""" + print(CLI_CONFIG.color_default, end="") + print("=" * (len(_header) + 2 * 5)) + print("--> %s <--" % _header) + print("=" * (len(_header) + 2 * 5)) + print(CLI_CONFIG.style_reset_all) diff --git a/ntclient/models/__init__.py b/ntclient/models/__init__.py index 7a8b378e..5f2ba54c 100644 --- a/ntclient/models/__init__.py +++ b/ntclient/models/__init__.py @@ -7,6 +7,13 @@ """ import csv +from ntclient import BUFFER_WD +from ntclient.persistence.sql.usda.funcs import ( + sql_analyze_foods, + sql_nutrients_overview, +) +from ntclient.services.analyze import day_format +from ntclient.services.calculate import calculate_nutrient_totals from ntclient.utils import CLI_CONFIG @@ -27,23 +34,29 @@ def __init__(self, file_path: str) -> None: self.food_data = {} # type: ignore + def _aggregate_rows(self) -> tuple: + """Aggregate rows into a tuple""" + print("Processing recipe file: %s" % self.file_path) + with open(self.file_path, "r", encoding="utf-8") as _file: + self.csv_reader = csv.DictReader(_file) + return tuple(list(self.csv_reader)) + def process_data(self) -> None: """ Parses out the raw CSV input read in during self.__init__() - TODO: test this with an empty CSV file - @todo: CliConfig class, to avoid these non top-level import shenanigans + TODO: test this with an empty CSV file, one with missing or corrupt values + (e.g. empty or non-numeric grams or food_id). + TODO: test with a CSV file that has duplicate recipe_id/uuid values. + TODO: how is the recipe home directory determined here? """ # Read into memory - print("Processing recipe file: %s" % self.file_path) - with open(self.file_path, "r", encoding="utf-8") as _file: - self.csv_reader = csv.DictReader(_file) - self.rows = tuple(self.csv_reader) + self.rows = self._aggregate_rows() # Validate data uuids = {x["recipe_id"] for x in self.rows} if len(uuids) != 1: - print("Found %s keys: %s" % (len(uuids), uuids)) + print("ERROR: Found %s keys: %s" % (len(uuids), uuids)) raise KeyError("FATAL: must have exactly 1 uuid per recipe CSV file!") self.uuid = list(uuids)[0] @@ -53,5 +66,37 @@ def process_data(self) -> None: if CLI_CONFIG.debug: print("Finished with recipe.") - def print_analysis(self) -> None: + def print_analysis(self, scale: float = 0, scale_mode: str = "kcal") -> None: """Run analysis on a single recipe""" + + # Get nutrient overview (RDAs, units, etc.) + nutrients_rows = sql_nutrients_overview() + nutrients = {int(x[0]): tuple(x) for x in nutrients_rows.values()} + + # Analyze foods in the recipe + food_ids = set(self.food_data.keys()) + foods_analysis = {} + for food in sql_analyze_foods(food_ids): + food_id = int(food[0]) + # nut_id, val (per 100g) + anl = (int(food[1]), float(food[2])) + if food_id not in foods_analysis: + foods_analysis[food_id] = [anl] + else: + foods_analysis[food_id].append(anl) + + # Compute totals + nutrient_totals, total_weight = calculate_nutrient_totals( + self.food_data, foods_analysis + ) + + # Print results using day_format for consistency + buffer = BUFFER_WD - 4 if BUFFER_WD > 4 else BUFFER_WD + day_format( + nutrient_totals, + nutrients, + buffer=buffer, + scale=scale, + scale_mode=scale_mode, + total_weight=total_weight, + ) diff --git a/ntclient/ntsqlite b/ntclient/ntsqlite index e69368ff..3383b4e3 160000 --- a/ntclient/ntsqlite +++ b/ntclient/ntsqlite @@ -1 +1 @@ -Subproject commit e69368ff9a64db7134a212686c08922c6537bcee +Subproject commit 3383b4e330f8614ec1d4a5e534c386cb730227dd diff --git a/ntclient/persistence/__init__.py b/ntclient/persistence/__init__.py index 717bd0e3..76db9a87 100644 --- a/ntclient/persistence/__init__.py +++ b/ntclient/persistence/__init__.py @@ -8,3 +8,17 @@ @author: shane """ +import configparser +import os + +from ntclient import NUTRA_HOME + +PREFS_FILE = os.path.join(NUTRA_HOME, "prefs.ini") + +os.makedirs(NUTRA_HOME, 0o755, exist_ok=True) + +if not os.path.isfile(PREFS_FILE): # pragma: no cover + print("INFO: Generating prefs.ini file") + config = configparser.ConfigParser() + with open(PREFS_FILE, "w", encoding="utf-8") as _prefs_file: + config.write(_prefs_file) diff --git a/ntclient/persistence/csv_manager.py b/ntclient/persistence/csv_manager.py new file mode 100644 index 00000000..43f1846b --- /dev/null +++ b/ntclient/persistence/csv_manager.py @@ -0,0 +1,41 @@ +""" +CSV Persistence Manager +Handles reading and writing to daily log CSV files. +""" + +import csv +import os +from typing import Dict, List, Union + + +def ensure_log_exists(log_path: str) -> None: + """Creates the log file with headers if it doesn't exist.""" + if not os.path.exists(log_path): + os.makedirs(os.path.dirname(log_path), exist_ok=True) + with open(log_path, "w", newline="", encoding="utf-8") as f: + writer = csv.writer(f) + writer.writerow(["id", "grams"]) + + +def append_to_log(log_path: str, food_id: int, grams: float) -> None: + """Appends a food entry to the specified log file.""" + ensure_log_exists(log_path) + with open(log_path, "a", newline="", encoding="utf-8") as f: + writer = csv.writer(f) + writer.writerow([food_id, grams]) + + +def read_log(log_path: str) -> List[Dict[str, Union[str, float]]]: + """Reads a log file and returns a list of dictionaries.""" + if not os.path.exists(log_path): + return [] + + with open(log_path, "r", encoding="utf-8") as f: + # Filter out comments/empty lines if necessary, matching existing logic + rows = [row for row in f if not row.startswith("#") and row.strip()] + if not rows: + return [] + + reader = csv.DictReader(rows) + # Check if empty (headers only or truly empty) - DictReader handles headers + return list(reader) diff --git a/ntclient/persistence/sql/__init__.py b/ntclient/persistence/sql/__init__.py index 944b0f87..285a2eb3 100644 --- a/ntclient/persistence/sql/__init__.py +++ b/ntclient/persistence/sql/__init__.py @@ -1,6 +1,7 @@ """Main SQL persistence module, shared between USDA and NT databases""" + import sqlite3 -from collections.abc import Sequence +from collections.abc import Sequence # pylint: disable=import-error from ntclient.utils import CLI_CONFIG @@ -9,20 +10,23 @@ # ------------------------------------------------ -def sql_entries(sql_result: sqlite3.Cursor) -> list: - """Formats and returns a `sql_result()` for console digestion and output""" - # TODO: return object: metadata, command, status, errors, etc? - - rows = sql_result.fetchall() - return rows +def sql_entries(sql_result: sqlite3.Cursor) -> tuple: + """ + Formats and returns a `sql_result()` for console digestion and output + FIXME: the IDs are not necessarily integers, but are unique. + TODO: return object: metadata, command, status, errors, etc? + """ -def sql_entries_headers(sql_result: sqlite3.Cursor) -> tuple: - """Formats and returns a `sql_result()` for console digestion and output""" rows = sql_result.fetchall() - headers = [x[0] for x in sql_result.description] + headers = [x[0] for x in (sql_result.description if sql_result.description else [])] - return headers, rows + return ( + rows, + headers, + sql_result.rowcount, + sql_result.lastrowid, + ) # ------------------------------------------------ @@ -76,10 +80,13 @@ def _prep_query( # TODO: separate `entry` & `entries` entity for single vs. bulk insert? if values: - if isinstance(values, list): - cur.executemany(query, values) - else: # tuple + if isinstance(values, tuple): cur.execute(query, values) + # elif isinstance(values, list): + # cur.executemany(query, values) + else: + raise TypeError("'values' must be a list or tuple!") + else: cur.execute(query) @@ -91,7 +98,7 @@ def _sql( query: str, db_name: str, values: Sequence = (), -) -> list: +) -> tuple: """@param values: tuple | list""" cur = _prep_query(con, query, db_name, values) @@ -102,19 +109,3 @@ def _sql( close_con_and_cur(con, cur) return result - - -def _sql_headers( - con: sqlite3.Connection, - query: str, - db_name: str, - values: Sequence = (), -) -> tuple: - """@param values: tuple | list""" - - cur = _prep_query(con, query, db_name, values) - - result = sql_entries_headers(cur) - - close_con_and_cur(con, cur) - return result diff --git a/ntclient/persistence/sql/nt/__init__.py b/ntclient/persistence/sql/nt/__init__.py index 58c9a05e..10c1a302 100644 --- a/ntclient/persistence/sql/nt/__init__.py +++ b/ntclient/persistence/sql/nt/__init__.py @@ -1,7 +1,8 @@ """Nutratracker DB specific sqlite module""" + import os import sqlite3 -from collections.abc import Sequence +from collections.abc import Sequence # pylint: disable=import-error from ntclient import ( NT_DB_NAME, @@ -10,7 +11,7 @@ NUTRA_HOME, __db_target_nt__, ) -from ntclient.persistence.sql import _sql, _sql_headers, version +from ntclient.persistence.sql import _sql, version from ntclient.utils.exceptions import SqlConnectError, SqlInvalidVersionError @@ -39,7 +40,7 @@ def nt_init() -> None: ) print("..DONE!") os.remove(NTSQLITE_BUILDPATH) # clean up - else: + else: # pragma: no cover # TODO: is this logic (and these error messages) the best? # what if .isdir() == True ? Fails with stacktrace? os.rename(NTSQLITE_BUILDPATH, NTSQLITE_DESTINATION) @@ -79,15 +80,8 @@ def nt_sqlite_connect(version_check: bool = True) -> sqlite3.Connection: raise SqlConnectError("ERROR: nt database doesn't exist, please run `nutra init`") -def sql(query: str, values: Sequence = ()) -> list: +def sql(query: str, values: Sequence = ()) -> tuple: """Executes a SQL command to nt.sqlite3""" con = nt_sqlite_connect() return _sql(con, query, db_name="nt", values=values) - - -def sql_headers(query: str, values: Sequence = ()) -> tuple: - """Executes a SQL command to nt.sqlite3""" - - con = nt_sqlite_connect() - return _sql_headers(con, query, db_name="nt", values=values) diff --git a/ntclient/persistence/sql/nt/funcs.py b/ntclient/persistence/sql/nt/funcs.py index af8a143c..0b3773a0 100644 --- a/ntclient/persistence/sql/nt/funcs.py +++ b/ntclient/persistence/sql/nt/funcs.py @@ -1,9 +1,12 @@ """nt.sqlite3 functions module""" + from ntclient.persistence.sql.nt import sql def sql_nt_next_index(table: str) -> int: """Used for previewing inserts""" + # TODO: parameterized queries # noinspection SqlResolve query = "SELECT MAX(id) as max_id FROM %s;" % table # nosec: B608 - return int(sql(query)[0]["max_id"]) + rows, _, _, _ = sql(query) + return int(rows[0]["max_id"] or 0) diff --git a/ntclient/persistence/sql/usda/__init__.py b/ntclient/persistence/sql/usda/__init__.py index f63fd2be..c16d8d3d 100644 --- a/ntclient/persistence/sql/usda/__init__.py +++ b/ntclient/persistence/sql/usda/__init__.py @@ -1,12 +1,13 @@ """USDA DB specific sqlite module""" + import os import sqlite3 import tarfile import urllib.request -from collections.abc import Sequence +from collections.abc import Sequence # pylint: disable=import-error from ntclient import NUTRA_HOME, USDA_DB_NAME, __db_target_usda__ -from ntclient.persistence.sql import _sql, _sql_headers, version +from ntclient.persistence.sql import _sql, version from ntclient.utils.exceptions import SqlConnectError, SqlInvalidVersionError @@ -20,7 +21,7 @@ def download_extract_usda() -> None: """Download USDA tarball from BitBucket and extract to storage folder""" # TODO: move this into separate module, ignore coverage. Avoid SLOW tests - if yes or input_agree().lower() == "y": + if yes or input_agree().lower() == "y": # pragma: no cover # TODO: save with version in filename? # Don't re-download tarball, just extract? save_path = os.path.join(NUTRA_HOME, "%s.tar.xz" % USDA_DB_NAME) @@ -40,15 +41,14 @@ def download_extract_usda() -> None: # or version mismatch due to developer mistake / overwrite? # And seed mirrors; don't hard code one host here! url = ( - "https://bitbucket.org/dasheenster/nutra-utils/downloads/{0}-{1}.tar.xz".format( - USDA_DB_NAME, __db_target_usda__ - ) + "https://github.com/nutratech/usda-sqlite/releases" + "/download/{1}/{0}-{1}.tar.xz".format(USDA_DB_NAME, __db_target_usda__) ) - if USDA_DB_NAME not in os.listdir(NUTRA_HOME): + if USDA_DB_NAME not in os.listdir(NUTRA_HOME): # pragma: no cover print("INFO: usda.sqlite3 doesn't exist, is this a fresh install?") download_extract_usda() - elif usda_ver() != __db_target_usda__: + elif usda_ver() != __db_target_usda__: # pragma: no cover print( "INFO: usda.sqlite3 target [{0}] doesn't match actual [{1}], ".format( __db_target_usda__, usda_ver() @@ -98,37 +98,26 @@ def usda_ver() -> str: return version(con) -def sql(query: str, values: Sequence = (), version_check: bool = True) -> list: +def sql( + query: str, + values: Sequence = (), + version_check: bool = True, + params: Sequence = (), +) -> tuple: """ Executes a SQL command to usda.sqlite3 @param query: Input SQL query - @param values: Union[tuple, list] Leave as empty tuple for no values, - e.g. bare query. Populate a tuple for a single insert. And use a list for - cur.executemany() - @param version_check: Ignore mismatch version, useful for "meta" commands + @param values: Union[tuple, list] (Deprecated: use params) + @param version_check: Ignore mismatch version + @param params: bind parameters @return: List of selected SQL items """ con = usda_sqlite_connect(version_check=version_check) - # TODO: support argument: _sql(..., params=params, ...) - return _sql(con, query, db_name="usda", values=values) - - -def sql_headers(query: str, values: Sequence = (), version_check: bool = True) -> tuple: - """ - Executes a SQL command to usda.sqlite3 [WITH HEADERS] - - @param query: Input SQL query - @param values: Union[tuple, list] Leave as empty tuple for no values, - e.g. bare query. Populate a tuple for a single insert. And use a list for - cur.executemany() - @param version_check: Ignore mismatch version, useful for "meta" commands - @return: List of selected SQL items - """ - - con = usda_sqlite_connect(version_check=version_check) + # Support params alias + _values = params if params else values # TODO: support argument: _sql(..., params=params, ...) - return _sql_headers(con, query, db_name="usda", values=values) + return _sql(con, query, db_name="usda", values=_values) diff --git a/ntclient/persistence/sql/usda/funcs.py b/ntclient/persistence/sql/usda/funcs.py index 34422325..6ef2570e 100644 --- a/ntclient/persistence/sql/usda/funcs.py +++ b/ntclient/persistence/sql/usda/funcs.py @@ -1,7 +1,7 @@ """usda.sqlite functions module""" from ntclient import NUTR_ID_KCAL -from ntclient.persistence.sql.usda import sql, sql_headers +from ntclient.persistence.sql.usda import sql ################################################################################ @@ -11,8 +11,8 @@ def sql_fdgrp() -> dict: """Shows food groups""" query = "SELECT * FROM fdgrp;" - result = sql(query) - return {x[0]: x for x in result} + rows, _, _, _ = sql(query) + return {x[0]: x for x in rows} def sql_food_details(_food_ids: set = None) -> list: # type: ignore @@ -20,28 +20,31 @@ def sql_food_details(_food_ids: set = None) -> list: # type: ignore if not _food_ids: query = "SELECT * FROM food_des;" + params = () else: - # TODO: does sqlite3 driver support this? cursor.executemany() ? - query = "SELECT * FROM food_des WHERE id IN (%s);" - food_ids = ",".join(str(x) for x in set(_food_ids)) - query = query % food_ids + # Generate placeholders for IN clause + placeholders = ",".join("?" for _ in _food_ids) + query = f"SELECT * FROM food_des WHERE id IN ({placeholders});" # nosec: B608 + params = tuple(_food_ids) - return sql(query) + rows, _, _, _ = sql(query, params=params) + return list(rows) def sql_nutrients_overview() -> dict: """Shows nutrients overview""" query = "SELECT * FROM nutrients_overview;" - result = sql(query) - return {x[0]: x for x in result} + rows, _, _, _ = sql(query) + return {x[0]: x for x in rows} def sql_nutrients_details() -> tuple: """Shows nutrients 'details'""" query = "SELECT * FROM nutrients_overview;" - return sql_headers(query) + rows, headers, _, _ = sql(query) + return rows, headers def sql_servings(_food_ids: set) -> list: @@ -59,9 +62,11 @@ def sql_servings(_food_ids: set) -> list: WHERE serv.food_id IN (%s); """ - # FIXME: support this kind of thing by library code & parameterized queries - food_ids = ",".join(str(x) for x in set(_food_ids)) - return sql(query % food_ids) + # Dynamically generate placeholders + placeholders = ",".join("?" for _ in _food_ids) + query = query % placeholders + rows, _, _, _ = sql(query, params=tuple(_food_ids)) + return list(rows) def sql_analyze_foods(food_ids: set) -> list: @@ -77,9 +82,11 @@ def sql_analyze_foods(food_ids: set) -> list: WHERE food_des.id IN (%s); """ - # TODO: parameterized queries - food_ids_concat = ",".join(str(x) for x in set(food_ids)) - return sql(query % food_ids_concat) + # parameterized queries + placeholders = ",".join("?" for _ in food_ids) + query = query % placeholders + rows, _, _, _ = sql(query, params=tuple(food_ids)) + return list(rows) ################################################################################ @@ -96,15 +103,17 @@ def sql_sort_helper1(nutrient_id: int) -> list: FROM nut_data WHERE - nutr_id = %s - OR nutr_id = %s + nutr_id = ? + OR nutr_id = ? ORDER BY food_id; """ - - return sql(query % (NUTR_ID_KCAL, nutrient_id)) + # Parameterized query + rows, _, _, _ = sql(query, params=(NUTR_ID_KCAL, nutrient_id)) + return list(rows) +# TODO: these functions are unused, replace `sql_sort_helper1` (above) with these two def sql_sort_foods(nutr_id: int) -> list: """Sort foods by nutr_id per 100 g""" @@ -123,12 +132,13 @@ def sql_sort_foods(nutr_id: int) -> list: LEFT JOIN nut_data kcal ON food.id = kcal.food_id AND kcal.nutr_id = 208 WHERE - nut_data.nutr_id = %s + nut_data.nutr_id = ? ORDER BY nut_data.nutr_val DESC; """ - - return sql(query % nutr_id) + # Parameterized query + rows, _, _, _ = sql(query, params=(nutr_id,)) + return list(rows) def sql_sort_foods_by_kcal(nutr_id: int) -> list: @@ -152,9 +162,10 @@ def sql_sort_foods_by_kcal(nutr_id: int) -> list: AND kcal.nutr_id = 208 AND kcal.nutr_val > 0 WHERE - nut_data.nutr_id = %s + nut_data.nutr_id = ? ORDER BY (nut_data.nutr_val / kcal.nutr_val) DESC; """ - - return sql(query % nutr_id) + # Parameterized query + rows, _, _, _ = sql(query, params=(nutr_id,)) + return list(rows) diff --git a/ntclient/resources/math/1rm-regressions.wls b/ntclient/resources/math/1rm-regressions.wls index 962ea89f..861a8c37 100755 --- a/ntclient/resources/math/1rm-regressions.wls +++ b/ntclient/resources/math/1rm-regressions.wls @@ -7,7 +7,7 @@ (**) (*(* Brzycki *)*) (*(*brzLin[n_]:=(37-n)/36*)*) -(*brz[n_]:=(37-n+0.005n^2)/36*) +(*brz[n_]:=(36.995-n+0.005n^2)/36*) (**) (*(* Dos Remedios *)*) (*dosPts={{1,1},{2,0.92},{3,0.9},{5,0.87},{6,0.82},{8,0.75},{10,0.7},{12,0.65},{15,0.6},{20,0.55}};*) @@ -38,6 +38,3 @@ (*dos[18]*) (*dos[19]*) (*dos[20]*) - - - diff --git a/ntclient/services/__init__.py b/ntclient/services/__init__.py index b540aaf6..0b641d43 100644 --- a/ntclient/services/__init__.py +++ b/ntclient/services/__init__.py @@ -1,4 +1,5 @@ """Services module, currently only home to SQL/persistence init method""" + import os from ntclient import NUTRA_HOME @@ -27,6 +28,7 @@ def init(yes: bool = False) -> tuple: if not os.path.isdir(NUTRA_HOME): os.makedirs(NUTRA_HOME, 0o755) print("..DONE!") + # TODO: should creating preferences/settings file be handled in persistence module? # TODO: print off checks, return False if failed print("USDA db ", end="") @@ -37,12 +39,12 @@ def init(yes: bool = False) -> tuple: build_ntsqlite() nt_init() - print("\nAll checks have passed!") + print("\nSuccess! All checks have passed!") print( """ -Nutrient tracker is free software. It comes with NO warranty or guarantee. +Nutrient Tracker is free software. It comes with NO warranty or guarantee. You may use it as you please. -You may make changes, as long as you disclose and publish them. +You may make changes as long as you disclose and publish them. """ ) return 0, True diff --git a/ntclient/services/analyze.py b/ntclient/services/analyze.py index 6824f3e1..cab8f820 100644 --- a/ntclient/services/analyze.py +++ b/ntclient/services/analyze.py @@ -7,6 +7,7 @@ import csv from collections import OrderedDict +from typing import Mapping, Sequence from tabulate import tabulate @@ -18,36 +19,41 @@ NUTR_ID_KCAL, NUTR_ID_PROTEIN, ) +from ntclient.core.nutprogbar import print_header, print_macro_bar, print_nutrient_bar from ntclient.persistence.sql.usda.funcs import ( sql_analyze_foods, sql_food_details, sql_nutrients_overview, sql_servings, ) +from ntclient.services.calculate import ( + calculate_nutrient_totals, + calculate_scaling_multiplier, +) from ntclient.utils import CLI_CONFIG -################################################################################ +############################################################################## # Foods -################################################################################ -def foods_analyze(food_ids: set, grams: float = 0) -> tuple: +############################################################################## +def foods_analyze( + food_ids: set, grams: float = 100, scale: float = 0, scale_mode: str = "kcal" +) -> tuple: """ Analyze a list of food_ids against stock RDA values - TODO: from ntclient.utils.nutprogbar import nutprogbar - TODO: support -t (tabular/non-visual) output flag + (NOTE: only supports a single food for now... add compare foods support later) """ + # pylint: disable=too-many-locals - ################################################################################ + ########################################################################## # Get analysis - ################################################################################ + ########################################################################## raw_analyses = sql_analyze_foods(food_ids) analyses = {} for analysis in raw_analyses: - food_id = analysis[0] - if grams: - anl = (analysis[1], round(analysis[2] * grams / 100, 2)) - else: - anl = (analysis[1], analysis[2]) + food_id = int(analysis[0]) + anl = (int(analysis[1]), float(round(analysis[2] * grams / 100, 2))) + # Add values to list if food_id not in analyses: analyses[food_id] = [anl] else: @@ -59,23 +65,26 @@ def foods_analyze(food_ids: set, grams: float = 0) -> tuple: nutrients = sql_nutrients_overview() rdas = {x[0]: x[1] for x in nutrients.values()} - ################################################################################ + ########################################################################## # Food-by-food analysis (w/ servings) - ################################################################################ + ########################################################################## servings_rows = [] nutrients_rows = [] for food_id, nut_val_tuples in analyses.items(): + # Print food name food_name = food_des[food_id][2] + if len(food_name) > 45: + food_name = food_name[:45] + "..." print( - "\n======================================\n" + "\n============================================================\n" + "==> {0} ({1})\n".format(food_name, food_id) - + "======================================\n" + + "============================================================\n" ) - print("\n=========================\nSERVINGS\n=========================\n") - ################################################################################ + ###################################################################### # Serving table - ################################################################################ + ###################################################################### + print_header("SERVINGS") headers = ["msre_id", "msre_desc", "grams"] serving_rows = [(x[1], x[2], x[3]) for x in serving if x[0] == food_id] # Print table @@ -83,57 +92,67 @@ def foods_analyze(food_ids: set, grams: float = 0) -> tuple: print(servings_table) servings_rows.append(serving_rows) + # Show refuse (aka waste) if available refuse = next( ((x[7], x[8]) for x in food_des.values() if x[0] == food_id and x[7]), None ) if refuse: - print("\n=========================\nREFUSE\n=========================\n") + print_header("REFUSE") print(refuse[0]) print(" ({0}%, by mass)".format(refuse[1])) - print("\n=========================\nNUTRITION\n=========================\n") + # Prepare analysis dict for day_format + analysis_dict = {x[0]: x[1] for x in nut_val_tuples} - ################################################################################ - # Nutrient table - ################################################################################ - headers = ["id", "nutrient", "rda", "amount", "units"] + # Reconstruct nutrient_rows to satisfy legacy return contract (and tests) nutrient_rows = [] for nutrient_id, amount in nut_val_tuples: - # Skip zero values if not amount: continue - nutr_desc = nutrients[nutrient_id][4] or nutrients[nutrient_id][3] unit = nutrients[nutrient_id][2] - - # Insert RDA % into row if rdas[nutrient_id]: - rda_perc = str(round(amount / rdas[nutrient_id] * 100, 1)) + "%" + rda_perc = float(round(amount / rdas[nutrient_id] * 100, 1)) else: rda_perc = None row = [nutrient_id, nutr_desc, rda_perc, round(amount, 2), unit] - nutrient_rows.append(row) - - ################################################################################ - # Print table - ################################################################################ - table = tabulate(nutrient_rows, headers=headers, tablefmt="presto") - print(table) nutrients_rows.append(nutrient_rows) + # Print view using consistent format + buffer = BUFFER_WD - 4 if BUFFER_WD > 4 else BUFFER_WD + day_format( + analysis_dict, + nutrients, + buffer=buffer, + scale=scale, + scale_mode=scale_mode, + total_weight=grams, + ) + return 0, nutrients_rows, servings_rows -################################################################################ +############################################################################## # Day -################################################################################ -def day_analyze(day_csv_paths: list, rda_csv_path: str = str()) -> tuple: - """Analyze a day optionally with custom RDAs, - e.g. nutra day ~/.nutra/rocky.csv -r ~/.nutra/dog-rdas-18lbs.csv - TODO: Should be a subset of foods_analyze +############################################################################## +def day_analyze( + day_csv_paths: Sequence[str], + rda_csv_path: str = str(), + scale: float = 0, + scale_mode: str = "kcal", +) -> tuple: + """Analyze a day optionally with custom RDAs, examples: + + ./nutra day tests/resources/day/human-test.csv + + nutra day ~/.nutra/rocky.csv -r ~/.nutra/dog-rdas-18lbs.csv + + TODO: Should be a subset of foods_analyze (encapsulate/abstract/reuse code) """ + # pylint: disable=too-many-locals,too-many-branches + # Get user RDAs from CSV file, if supplied if rda_csv_path: with open(rda_csv_path, encoding="utf-8") as file_path: rda_csv_input = csv.DictReader( @@ -143,6 +162,7 @@ def day_analyze(day_csv_paths: list, rda_csv_path: str = str()) -> tuple: else: rdas = [] + # Get daily logs from CSV file logs = [] food_ids = set() for day_csv_path in day_csv_paths: @@ -155,7 +175,7 @@ def day_analyze(day_csv_paths: list, rda_csv_path: str = str()) -> tuple: food_ids.add(int(entry["id"])) logs.append(log) - # Inject user RDAs + # Inject user RDAs, if supplied (otherwise fall back to defaults) nutrients_lists = [list(x) for x in sql_nutrients_overview().values()] for rda in rdas: nutrient_id = int(rda["id"]) @@ -166,7 +186,8 @@ def day_analyze(day_csv_paths: list, rda_csv_path: str = str()) -> tuple: if CLI_CONFIG.debug: substr = "{0} {1}".format(_rda, _nutrient[2]).ljust(12) print("INJECT RDA: {0} --> {1}".format(substr, _nutrient[4])) - nutrients = {x[0]: x for x in nutrients_lists} + nutrients = {int(x[0]): tuple(x) for x in nutrients_lists} + print(nutrients) # Analyze foods foods_analysis = {} @@ -180,134 +201,64 @@ def day_analyze(day_csv_paths: list, rda_csv_path: str = str()) -> tuple: # Compute totals nutrients_totals = [] + total_grams_list = [] + for log in logs: - nutrient_totals = OrderedDict() # dict()/{} is NOT ORDERED before 3.6/3.7 + # Aggregate duplicates in log if any + food_data: OrderedDict[int, float] = OrderedDict() for entry in log: if entry["id"]: - food_id = int(entry["id"]) - grams = float(entry["grams"]) - for _nutrient2 in foods_analysis[food_id]: - nutr_id = _nutrient2[0] - nutr_per_100g = _nutrient2[1] - nutr_val = grams / 100 * nutr_per_100g - if nutr_id not in nutrient_totals: - nutrient_totals[nutr_id] = nutr_val - else: - nutrient_totals[nutr_id] += nutr_val + f_id = int(entry["id"]) + f_grams = float(entry["grams"]) + if f_id in food_data: + food_data[f_id] += f_grams + else: + food_data[f_id] = f_grams + + nutrient_totals, daily_grams = calculate_nutrient_totals( + food_data, foods_analysis + ) nutrients_totals.append(nutrient_totals) + total_grams_list.append(daily_grams) - ####### - # Print + # Print results buffer = BUFFER_WD - 4 if BUFFER_WD > 4 else BUFFER_WD - for analysis in nutrients_totals: - day_format(analysis, nutrients, buffer=buffer) + for i, analysis in enumerate(nutrients_totals): + day_format( + analysis, + nutrients, + buffer=buffer, + scale=scale, + scale_mode=scale_mode, + total_weight=total_grams_list[i], + ) return 0, nutrients_totals -# TODO: why not this...? nutrients: Mapping[int, tuple] -def day_format(analysis: dict, nutrients: dict, buffer: int = 0) -> None: +def day_format( + analysis: Mapping[int, float], + nutrients: Mapping[int, tuple], + buffer: int = 0, + scale: float = 0, + scale_mode: str = "kcal", + total_weight: float = 0, +) -> None: """Formats day analysis for printing to console""" + # pylint: disable=too-many-arguments,too-many-locals - def print_header(header: str) -> None: - print(CLI_CONFIG.color_default, end="") - print("~~~~~~~~~~~~~~~~~~~~~~~~~~~") - print("--> %s" % header) - print("~~~~~~~~~~~~~~~~~~~~~~~~~~~") - print(CLI_CONFIG.style_reset_all) - - def print_macro_bar( - _fat: float, _net_carb: float, _pro: float, _kcals_max: float, _buffer: int = 0 - ) -> None: - _kcals = fat * 9 + net_carb * 4 + _pro * 4 - - p_fat = (_fat * 9) / _kcals - p_car = (_net_carb * 4) / _kcals - p_pro = (_pro * 4) / _kcals - - # TODO: handle rounding cases, tack on to, or trim off FROM LONGEST ? - mult = _kcals / _kcals_max - n_fat = round(p_fat * _buffer * mult) - n_car = round(p_car * _buffer * mult) - n_pro = round(p_pro * _buffer * mult) - - # Headers - f_buf = " " * (n_fat // 2) + "Fat" + " " * (n_fat - n_fat // 2 - 3) - c_buf = " " * (n_car // 2) + "Carbs" + " " * (n_car - n_car // 2 - 5) - p_buf = " " * (n_pro // 2) + "Pro" + " " * (n_pro - n_pro // 2 - 3) - print( - " " - + CLI_CONFIG.color_yellow - + f_buf - + CLI_CONFIG.color_blue - + c_buf - + CLI_CONFIG.color_red - + p_buf - + CLI_CONFIG.style_reset_all - ) - - # Bars - print(" <", end="") - print(CLI_CONFIG.color_yellow + "=" * n_fat, end="") - print(CLI_CONFIG.color_blue + "=" * n_car, end="") - print(CLI_CONFIG.color_red + "=" * n_pro, end="") - print(CLI_CONFIG.style_reset_all + ">") - - # Calorie footers - k_fat = str(round(fat * 9)) - k_car = str(round(net_carb * 4)) - k_pro = str(round(pro * 4)) - f_buf = " " * (n_fat // 2) + k_fat + " " * (n_fat - n_fat // 2 - len(k_fat)) - c_buf = " " * (n_car // 2) + k_car + " " * (n_car - n_car // 2 - len(k_car)) - p_buf = " " * (n_pro // 2) + k_pro + " " * (n_pro - n_pro // 2 - len(k_pro)) - print( - " " - + CLI_CONFIG.color_yellow - + f_buf - + CLI_CONFIG.color_blue - + c_buf - + CLI_CONFIG.color_red - + p_buf - + CLI_CONFIG.style_reset_all - ) - - def print_nute_bar(_n_id: int, amount: float, _nutrients: dict) -> tuple: - nutrient = _nutrients[_n_id] - rda = nutrient[1] - tag = nutrient[3] - unit = nutrient[2] - # anti = nutrient[5] - - if not rda: - return False, nutrient - attain = amount / rda - perc = round(100 * attain, 1) - - if attain >= CLI_CONFIG.thresh_over: - color = CLI_CONFIG.color_over - elif attain <= CLI_CONFIG.thresh_crit: - color = CLI_CONFIG.color_crit - elif attain <= CLI_CONFIG.thresh_warn: - color = CLI_CONFIG.color_warn - else: - color = CLI_CONFIG.color_default - - # Print - detail_amount = "{0}/{1} {2}".format(round(amount, 1), rda, unit).ljust(18) - detail_amount = "{0} -- {1}".format(detail_amount, tag) - left_index = 20 - left_pos = round(left_index * attain) if attain < 1 else left_index - print(" {0}<".format(color), end="") - print("=" * left_pos + " " * (left_index - left_pos) + ">", end="") - print(" {0}%\t[{1}]".format(perc, detail_amount), end="") - print(CLI_CONFIG.style_reset_all) + multiplier = calculate_scaling_multiplier( + scale, scale_mode, analysis, nutrients, total_weight + ) - return True, perc + # Apply multiplier + if multiplier != 1.0: + analysis = {k: v * multiplier for k, v in analysis.items()} # Actual values - kcals = round(analysis[NUTR_ID_KCAL]) - pro = analysis[NUTR_ID_PROTEIN] - net_carb = analysis[NUTR_ID_CARBS] - analysis[NUTR_ID_FIBER] - fat = analysis[NUTR_ID_FAT_TOT] + kcals = round(analysis.get(NUTR_ID_KCAL, 0)) + pro = analysis.get(NUTR_ID_PROTEIN, 0) + net_carb = analysis.get(NUTR_ID_CARBS, 0) - analysis.get(NUTR_ID_FIBER, 0) + fat = analysis.get(NUTR_ID_FAT_TOT, 0) kcals_449 = round(4 * pro + 4 * net_carb + 9 * fat) # Desired values @@ -317,14 +268,17 @@ def print_nute_bar(_n_id: int, amount: float, _nutrients: dict) -> tuple: fat_rda = nutrients[NUTR_ID_FAT_TOT][1] # Print calories and macronutrient bars - print_header("Macronutrients") + print_header("Macro-nutrients") kcals_max = max(kcals, kcals_rda) - rda_perc = round(kcals * 100 / kcals_rda, 1) + rda_perc = round(kcals * 100 / kcals_rda, 1) if kcals_rda else 0 print( "Actual: {0} kcal ({1}% RDA), {2} by 4-4-9".format( kcals, rda_perc, kcals_449 ) ) + if scale: + print(" (Scaled to %s %s)" % (scale, scale_mode)) + print_macro_bar(fat, net_carb, pro, kcals_max, _buffer=buffer) print( "\nDesired: {0} kcal ({1} kcal)".format( @@ -340,11 +294,11 @@ def print_nute_bar(_n_id: int, amount: float, _nutrients: dict) -> tuple: ) # Nutrition detail report - print_header("Nutrition detail report") - for n_id in analysis: - print_nute_bar(n_id, analysis[n_id], nutrients) - # TODO: below + print_header("Nutrition detail report%s" % (" (SCALED)" if scale else "")) + for nutr_id, nutr_val in analysis.items(): + print_nutrient_bar(nutr_id, nutr_val, nutrients) + # TODO: actually filter and show the number of filtered fields print( - "work in progress... " - "some minor fields with negligible data, they are not shown here" + "work in progress...", + "some minor fields with negligible data, they are not shown here", ) diff --git a/ntclient/services/api/__init__.py b/ntclient/services/api/__init__.py new file mode 100644 index 00000000..37b399ae --- /dev/null +++ b/ntclient/services/api/__init__.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +""" +Created on Tue Feb 13 14:28:20 2024 + +@author: shane +""" +import requests + +REQUEST_READ_TIMEOUT = 18 +REQUEST_CONNECT_TIMEOUT = 5 + +# TODO: try all of these; cache (save in prefs.json) the one which works first +URLS_API = ( + "https://api.nutra.tk", + "https://api.dev.nutra.tk", + "http://216.218.228.93", # dev + "http://216.218.216.163", # prod +) + + +def cache_mirrors() -> str: + """Cache mirrors""" + for mirror in URLS_API: + try: + _res = requests.get( + mirror, + timeout=(REQUEST_CONNECT_TIMEOUT, REQUEST_READ_TIMEOUT), + verify=mirror.startswith("https://"), + ) + + _res.raise_for_status() + # TODO: save in persistence config.ini + print("INFO: mirror SUCCESS '%s'" % mirror) + return mirror + except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError): + print("WARN: mirror FAILURE '%s'" % mirror) + + return str() + + +class ApiClient: + """Client for connecting to the remote server/API.""" + + def __init__(self) -> None: + self.host = cache_mirrors() + if not self.host: # pragma: no cover + raise ConnectionError("Cannot find suitable API host!") + + def post(self, path: str, data: dict) -> requests.Response: + """Post data to the API.""" + _res = requests.post( + self.host + "/" + path, + json=data, + timeout=(REQUEST_CONNECT_TIMEOUT, REQUEST_READ_TIMEOUT), + ) + _res.raise_for_status() + return _res + + # TODO: move this outside class; support with host iteration helper method + def post_bug(self, bug: dict) -> requests.Response: + """Post a bug report to the developer.""" + return self.post("bug", bug) diff --git a/ntclient/services/bugs.py b/ntclient/services/bugs.py new file mode 100644 index 00000000..df2462c3 --- /dev/null +++ b/ntclient/services/bugs.py @@ -0,0 +1,142 @@ +# -*- coding: utf-8 -*- +""" +Created on Tue Feb 13 09:51:48 2024 + +@author: shane +""" +import os +import platform +import sqlite3 +import traceback +from typing import Sequence + +import ntclient.services.api +from ntclient import __db_target_nt__, __db_target_usda__, __version__ +from ntclient.persistence.sql.nt import sql as sql_nt +from ntclient.utils import CLI_CONFIG + +# TODO: handle mocks in tests so coverage doesn't vary when bugs exist (vs. don't) + + +def insert(args: list, exception: Exception) -> None: + """Insert bug report into nt.sqlite3, return True/False.""" + print("INFO: inserting bug report...") + try: + sql_nt( + """ +INSERT INTO bug + (profile_id, arguments, exc_type, exc_msg, stack, client_info, app_info, user_details) + VALUES + (?,?,?,?,?,?,?,?) + """, + ( + 1, + " ".join(args) if args else None, + exception.__class__.__name__, + str(exception), + os.linesep.join(traceback.format_tb(exception.__traceback__)), + # client_info + str( + { + "platform": platform.system(), + "python_version": platform.python_version(), + "client_interface": "cli", + } + ), + # app_info + str( + { + "version": __version__, + "version_nt_db_target": __db_target_nt__, + "version_usda_db_target": __db_target_usda__, + } + ), + # user_details (TODO: add user details) + None, + ), + ) + except sqlite3.IntegrityError as exc: + print("WARN: %s" % repr(exc)) + dupe_bug_insertion_exc = ( + "IntegrityError('UNIQUE constraint failed: bug.arguments, bug.stack')" + ) + if repr(exc) == dupe_bug_insertion_exc: + print("INFO: bug report already exists") + else: # pragma: no cover + raise + + +def _list_bugs() -> list: + """List all bugs, with headers as dict keys.""" + rows, _, _, _ = sql_nt("SELECT * FROM bug") + bugs = [dict(x) for x in rows] + return bugs + + +def list_bugs(show_all: bool) -> tuple: + """List all bugs, with headers. Returns (exit_code, bugs: list[dict]).""" + + bugs = _list_bugs() + n_bugs_total = len(bugs) + n_bugs_unsubmitted = len([x for x in bugs if not bool(x["submitted"])]) + + print("You have: %s total bugs amassed in your journey." % n_bugs_total) + print("Of these, %s require submission/reporting." % n_bugs_unsubmitted) + print() + + for bug in bugs: + if not show_all: + continue + # Skip submitted bugs by default + if bool(bug["submitted"]) and not CLI_CONFIG.debug: + continue + # Print all bug properties (except noisy stacktrace) + print(", ".join(str(x) for x in bug.values() if "\n" not in str(x))) + print() + + if n_bugs_unsubmitted > 0: + print("NOTE: You have bugs awaiting submission. Please run the report command") + return 0, bugs + + +def _list_bugs_unsubmitted() -> Sequence[dict]: + """List unsubmitted bugs, with headers as dict keys.""" + rows, _, _, _ = sql_nt("SELECT * FROM bug WHERE submitted = 0") + bugs = [dict(x) for x in rows] + return bugs + + +def submit_bugs() -> int: + """Submit bug reports to developer, return n_submitted.""" + bugs = _list_bugs_unsubmitted() + + if len(bugs) == 0: + print("INFO: no unsubmitted bugs found") + return 0 + + api_client = ntclient.services.api.ApiClient() + + n_submitted = 0 + print("submitting %s bug reports..." % len(bugs)) + print("_" * len(bugs)) + + for bug in bugs: + _res = api_client.post_bug(bug) + + if CLI_CONFIG.debug: # pragma: no cover + print(_res.json()) + + # Distinguish bug which are unique vs. duplicates (someone else submitted) + if _res.status_code == 201: + sql_nt("UPDATE bug SET submitted = 1 WHERE id = ?", (bug["id"],)) + elif _res.status_code == 204: + sql_nt("UPDATE bug SET submitted = 2 WHERE id = ?", (bug["id"],)) + else: # pragma: no cover + print("WARN: unknown status [{0}]".format(_res.status_code)) + continue + + print(".", end="", flush=True) + n_submitted += 1 + + print("submitted: {0} bugs".format(n_submitted)) + return n_submitted diff --git a/ntclient/services/calculate.py b/ntclient/services/calculate.py index 8fc93228..4a387ca5 100644 --- a/ntclient/services/calculate.py +++ b/ntclient/services/calculate.py @@ -8,7 +8,10 @@ """ import argparse import math +from collections import OrderedDict +from typing import Mapping +from ntclient import NUTR_ID_KCAL from ntclient.utils import Gender # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -112,7 +115,7 @@ def orm_dos_remedios(weight: float, reps: int) -> dict: } # Compute the 1-rep max - # NOTE: this should be guaranteed by arg-parse to be an integer, and 0 < n <= 20 + # NOTE: this should be guaranteed by arg-parse to be an integer, and 1 ≤ n ≤ 20 one_rm = round( weight / _max_rep_ratios[reps], 1, @@ -511,3 +514,79 @@ def lbl_casey_butt(height: float, args: argparse.Namespace) -> tuple: # calf round(0.9812 * ankle + 0.1250 * height, 2), ) + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# Nutrient Aggregation +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +def calculate_nutrient_totals( + food_data: Mapping[int, float], foods_analysis: Mapping[int, list] +) -> tuple[OrderedDict, float]: + """ + Common logic to aggregate nutrient data for a list of foods. + + @param food_data: dict of {food_id: grams, ...} + @param foods_analysis: dict of {food_id: [(nutr_id, val_per_100g), ...], ...} + @return: (nutrient_totals, total_grams) + """ + nutrient_totals = OrderedDict() + total_grams = 0.0 + + for food_id, grams in food_data.items(): + total_grams += grams + if food_id not in foods_analysis: + continue + for _nutrient in foods_analysis[food_id]: + nutr_id = _nutrient[0] + nutr_per_100g = _nutrient[1] + nutr_val = grams / 100 * nutr_per_100g + if nutr_id not in nutrient_totals: + nutrient_totals[nutr_id] = nutr_val + else: + nutrient_totals[nutr_id] += nutr_val + + return nutrient_totals, total_grams + + +def calculate_scaling_multiplier( + scale: float, + scale_mode: str, + analysis: Mapping, + nutrients: Mapping, + total_weight: float, +) -> float: + """ + Determine the multiplier needed to scale the analysis values. + """ + multiplier = 1.0 + if not scale: + return multiplier + if scale_mode == "kcal": + current_val = analysis.get(NUTR_ID_KCAL, 0) + multiplier = scale / current_val if current_val else 0 + elif scale_mode == "weight": + multiplier = scale / total_weight if total_weight else 0 + else: + # Try to interpret scale_mode as nutrient ID or Name + target_id = None + # 1. Check if int + try: + target_id = int(scale_mode) + except ValueError: + # 2. Check names + for n_id, n_data in nutrients.items(): + # n_data usually: (id, rda, unit, tag, name, ...) + if scale_mode.lower() in str(n_data[3]).lower(): + target_id = n_id + break + if scale_mode.lower() in str(n_data[4]).lower(): + target_id = n_id + break + + if target_id and target_id in analysis: + current_val = analysis[target_id] + multiplier = scale / current_val if current_val else 0 + else: + print(f"WARN: Could not scale by '{scale_mode}', nutrient not found.") + + return multiplier diff --git a/ntclient/services/logs.py b/ntclient/services/logs.py new file mode 100644 index 00000000..58a2a67d --- /dev/null +++ b/ntclient/services/logs.py @@ -0,0 +1,95 @@ +""" +Logs Service +Business logic for managing daily food logs. +""" + +import datetime +import os +from typing import Optional + +from tabulate import tabulate + +from ntclient import NUTRA_HOME +from ntclient.persistence.csv_manager import append_to_log, read_log +from ntclient.persistence.sql.usda.funcs import sql_food_details +from ntclient.services.analyze import day_analyze + + +def get_log_path(date_str: Optional[str] = None) -> str: + """ + Returns the absolute path to the log file for the given date. + Defaults to today's date if date_str is None. + Expected date format: YYYY-MM-DD (or similar valid filename) + """ + if not date_str: + date_str = datetime.date.today().isoformat() + + # Sanitize inputs strictly if necessary, but assuming basic CLI usage for now + filename = f"{date_str}.csv" + return os.path.join(NUTRA_HOME, filename) + + +def log_add(food_id: int, grams: float, date_str: Optional[str] = None) -> None: + """ + Adds a food entry to the recurring daily log. + Validates that the food_id exists in the USDA database. + """ + # Validate Food ID + food_details = sql_food_details({food_id}) + if not food_details: + print(f"ERROR: Food ID {food_id} not found in database.") + return + + log_path = get_log_path(date_str) + append_to_log(log_path, food_id, grams) + + # Feedback + food_name = food_details[0][2] + # Truncate + if len(food_name) > 40: + food_name = food_name[:37] + "..." + print( + f"Added: {grams}g of '{food_name}' ({food_id}) to {os.path.basename(log_path)}" + ) + + +def log_view(date_str: Optional[str] = None) -> None: + """ + Views the raw entries of a log file. + """ + log_path = get_log_path(date_str) + entries = read_log(log_path) + + if not entries: + print(f"No log entries found for {os.path.basename(log_path)}") + return + + # Enrich with food names for display + # entries is list of dicts like {'id': '1001', 'grams': '100'} + food_ids = {int(e["id"]) for e in entries if e["id"]} + food_des = {x[0]: x[2] for x in sql_food_details(food_ids)} + + table_data = [] + for e in entries: + fid = int(e["id"]) + grams = float(e["grams"]) + name = food_des.get(fid, "Unknown Food") + if len(name) > 50: + name = name[:47] + "..." + table_data.append([fid, name, grams]) + + print(f"\nLog: {os.path.basename(log_path)}") + print(tabulate(table_data, headers=["ID", "Food", "Grams"], tablefmt="simple")) + + +def log_analyze(date_str: Optional[str] = None) -> None: + """ + Runs full analysis on the log file. + """ + log_path = get_log_path(date_str) + if not os.path.exists(log_path): + print(f"Log file not found: {log_path}") + return + + # Reuse existing analysis logic + day_analyze([log_path]) diff --git a/ntclient/services/recipe/utils.py b/ntclient/services/recipe/recipe.py similarity index 86% rename from ntclient/services/recipe/utils.py rename to ntclient/services/recipe/recipe.py index 63dbafc0..6f8e0fd8 100644 --- a/ntclient/services/recipe/utils.py +++ b/ntclient/services/recipe/recipe.py @@ -52,23 +52,27 @@ def recipes_overview() -> tuple: try: csv_utils.csv_recipe_print_tree() return 0, None - except FileNotFoundError: + except FileNotFoundError: # pragma: no covers print("WARN: no recipes found, create some or run: nutra recipe init") return 1, None -def recipe_overview(recipe_path: str) -> tuple: +def recipe_overview( + recipe_path: str, scale: float = 0, scale_mode: str = "kcal" +) -> tuple: """ Shows single recipe overview @param recipe_path: full path on disk + @param scale: optional target value to scale to + @param scale_mode: mode for scaling (kcal, weight, nutrient) @return: (exit_code: int, None) """ try: _recipe = Recipe(recipe_path) _recipe.process_data() - # TODO: extract relevant bits off, process, use nutprogbar (e.g. day analysis) + _recipe.print_analysis(scale=scale, scale_mode=scale_mode) return 0, _recipe except (FileNotFoundError, IndexError) as err: print("ERROR: %s" % repr(err)) diff --git a/ntclient/services/usda.py b/ntclient/services/usda.py index b6face29..7244f81c 100644 --- a/ntclient/services/usda.py +++ b/ntclient/services/usda.py @@ -30,7 +30,7 @@ def list_nutrients() -> tuple: """Lists out nutrients with basic details""" - headers, nutrients = sql_nutrients_details() + nutrients, headers = sql_nutrients_details() # TODO: include in SQL table cache? headers.append("avg_rda") nutrients = [list(x) for x in nutrients] @@ -58,10 +58,11 @@ def sort_foods( nutrient_id: int, by_kcal: bool, limit: int = DEFAULT_RESULT_LIMIT ) -> tuple: """Sort, by nutrient, either (amount / 100 g) or (amount / 200 kcal)""" + # pylint: disable=too-many-locals # TODO: sub shrt_desc for long if available, and support config.FOOD_NAME_TRUNC - def print_results(_results: list, _nutrient_id: int) -> list: + def print_results(_results: list, _nutrient_id: int) -> None: """Prints truncated list for sort""" nutrients = sql_nutrients_overview() nutrient = nutrients[_nutrient_id] @@ -72,7 +73,6 @@ def print_results(_results: list, _nutrient_id: int) -> list: table = tabulate(_results, headers=headers, tablefmt="simple") print(table) - return _results # Gets values for nutrient_id and kcal=208 nut_data = sql_sort_helper1(nutrient_id) @@ -129,6 +129,7 @@ def print_results(_results: list, _nutrient_id: int) -> list: ################################################################################ def search(words: list, fdgrp_id: int = 0, limit: int = DEFAULT_RESULT_LIMIT) -> tuple: """Searches foods for input""" + # pylint: disable=too-many-locals def tabulate_search(_results: list) -> list: """Makes search results more readable""" diff --git a/ntclient/utils/__init__.py b/ntclient/utils/__init__.py index 042f66f2..9f54cb3a 100644 --- a/ntclient/utils/__init__.py +++ b/ntclient/utils/__init__.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sun Mar 26 23:07:30 2023 diff --git a/ntclient/utils/sql.py b/ntclient/utils/sql.py new file mode 100644 index 00000000..f5c81da6 --- /dev/null +++ b/ntclient/utils/sql.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +""" +Created on Tue Feb 13 14:15:21 2024 + +@author: shane +""" +from ntclient.services.bugs import insert as bug_insert +from ntclient.utils import CLI_CONFIG + + +def handle_runtime_exception(args: list, exception: Exception) -> None: + """ + Handles exceptions raised during runtime. + """ + print("ERROR: Exception: %s" % exception) + bug_insert(args, exception) + if CLI_CONFIG.debug: + raise exception diff --git a/ntclient/utils/tree.py b/ntclient/utils/tree.py index 9a50a7e2..de9568b5 100644 --- a/ntclient/utils/tree.py +++ b/ntclient/utils/tree.py @@ -26,7 +26,7 @@ def colorize(path: str, full: bool = False) -> str: file = path if full else os.path.basename(path) if os.path.islink(path): - return "".join( + return "".join( # pragma: no cover [ COLOR_LINK, file, @@ -39,7 +39,7 @@ def colorize(path: str, full: bool = False) -> str: if os.path.isdir(path): return "".join([COLOR_DIR, file, colors.STYLE_RESET_ALL]) - if os.access(path, os.X_OK): + if os.access(path, os.X_OK): # pragma: no cover return "".join([COLOR_EXEC, file, colors.STYLE_RESET_ALL]) return file @@ -68,11 +68,11 @@ def print_dir(_dir: str, pre: str = str()) -> tuple: dir_len = len(os.listdir(_dir)) - 1 for i, file in enumerate(sorted(os.listdir(_dir), key=str.lower)): path = os.path.join(_dir, file) - if file.startswith(".") and not SHOW_HIDDEN: + if file.startswith(".") and not SHOW_HIDDEN: # pragma: no cover continue if os.path.isdir(path): print(pre + strs[2 if i == dir_len else 1] + colorize(path)) - if os.path.islink(path): + if os.path.islink(path): # pragma: no cover n_dirs += 1 else: n_d, n_f, n_s = print_dir(path, pre + strs[3 if i == dir_len else 0]) diff --git a/nutra b/nutra index ff1e9da0..8923964c 100755 --- a/nutra +++ b/nutra @@ -6,7 +6,6 @@ Created on Fri Sep 28 22:25:38 2018 @author: shane """ - import sys from ntclient.__main__ import main diff --git a/requirements-lint.txt b/requirements-lint.txt index b8e097c8..77f44ae0 100644 --- a/requirements-lint.txt +++ b/requirements-lint.txt @@ -1,10 +1,11 @@ -bandit==1.7.5 +bandit==1.7.8 black==24.4.0 doc8==1.1.1 -flake8==6.0.0 -mypy==1.1.1 -pylint==2.17.1 -types-colorama==0.4.15.11 -types-psycopg2==2.9.21.9 -types-setuptools==67.6.0.6 -types-tabulate==0.9.0.2 +flake8==7.0.0 +mypy==1.9.0 +pylint==3.1.0 +types-colorama==0.4.15.20240311 +types-psycopg2==2.9.21.20240417 +types-requests==2.31.0.20240406 +types-setuptools==69.5.0.20240415 +types-tabulate==0.9.0.20240106 diff --git a/requirements-optional.txt b/requirements-optional.txt index 453c47bd..8c7b966e 100644 --- a/requirements-optional.txt +++ b/requirements-optional.txt @@ -1 +1 @@ -python-Levenshtein<=0.12.2 +Levenshtein diff --git a/requirements-test.txt b/requirements-test.txt index 86c8192b..153ebb82 100644 --- a/requirements-test.txt +++ b/requirements-test.txt @@ -1,2 +1,4 @@ +# TODO: test upgrading these; verify they work on older OSes/Python versions coverage>=6.2 pytest>=7.0.1 +requests-mock>=1.12.1 diff --git a/requirements.txt b/requirements.txt index 6a6c215d..c2010f58 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,5 @@ argcomplete>=1.8.2,<=1.12.3 colorama>=0.1.17,<=0.4.1 fuzzywuzzy>=0.3.0 +requests>=2.0.0 tabulate>=0.4.3,<=0.8.9 diff --git a/scripts/n b/scripts/n index a1b3619e..eec82faf 100755 --- a/scripts/n +++ b/scripts/n @@ -1,7 +1,13 @@ -#!/usr/bin/python3 +#!/usr/bin/env python3 # -*- coding: utf-8 -*- # PYTHON_ARGCOMPLETE_OK -"""Executable script, copied over by pip""" +""" +Created on Fri Sep 28 22:25:38 2018 + +Executable script, copied over by pip. + +@author: shane +""" import re import sys diff --git a/setup.cfg b/setup.cfg index 377db12d..fb8bbc16 100644 --- a/setup.cfg +++ b/setup.cfg @@ -5,14 +5,14 @@ testpaths = [coverage:run] # See: https://coverage.readthedocs.io/en/7.2.2/config.html#run -command_line = -m pytest +command_line = -m pytest -svv source = ntclient [coverage:report] fail_under = 90.00 precision = 2 -show_missing = True +; show_missing = True skip_empty = True skip_covered = True diff --git a/tests/__init__.py b/tests/__init__.py index e69de29b..48895094 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -0,0 +1,14 @@ +# -*- coding: utf-8 -*- +""" +Created on Fri Apr 12 16:51:14 2024 + +@author: shane +""" + +# TODO: attach some env props to it, and re-instantiate a CliConfig() class. +# We're just setting it on the shell, as an env var, before running tests in CI. +# e.g. the equivalent of putting this early in the __init__ file; +# os.environ["NUTRA_HOME"] = os.path.join(TEST_HOME, ".nutra.test") +# ... +# handle setting up the usda.sqlite3 and nt.sqlite3 files in the test home dir. +# This will allow us to test the persistence layer, and the API layer, in isolation. diff --git a/tests/aaa/__init__.py b/tests/aaa/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/aaa/test_init.py b/tests/aaa/test_init.py new file mode 100644 index 00000000..bfab1e36 --- /dev/null +++ b/tests/aaa/test_init.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +""" +Created on Sun Feb 25 16:43:56 2024 + +@author: shane + +NOTE: these tests are in a folder "aaa\" which is alphabetically RUN FIRST. + Other tests, such as test_bug, depend on having the newer version of nt.sqlite3 +""" +from unittest.mock import patch + +from ntclient.services import init + + +def test_init() -> None: + """Tests the SQL/persistence init in real time""" + with patch("os.path.isdir", return_value=False): + with patch("os.makedirs", return_value=None): + code, result = init(yes=True) + + assert code == 0 + assert result diff --git a/tests/persistence/__init__.py b/tests/persistence/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/persistence/test_sql.py b/tests/persistence/test_sql.py new file mode 100644 index 00000000..7f51afff --- /dev/null +++ b/tests/persistence/test_sql.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +""" +Created on Fri Apr 12 18:22:39 2024 + +@author: shane +""" +import pytest + +from ntclient.persistence.sql import _prep_query +from ntclient.persistence.sql.nt import nt_sqlite_connect + + +def test_prep_query_with_non_iterative_values_throws_type_error() -> None: + """Test the _prep_query method if a bare (non-iterative) values is passed in.""" + + con = nt_sqlite_connect() + query = "SELECT * FROM version WHERE id = ?;" + db_name = "nt" + values = 1 + + with pytest.raises(TypeError): + _prep_query(con, query, db_name, values) # type: ignore diff --git a/tests/resources/prefs.json b/tests/resources/prefs.json deleted file mode 100644 index 27b89209..00000000 --- a/tests/resources/prefs.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "current_user": 1 -} diff --git a/tests/services/test_api.py b/tests/services/test_api.py new file mode 100644 index 00000000..73bede9e --- /dev/null +++ b/tests/services/test_api.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +""" +Created on Fri Apr 12 16:14:03 2024 + +@author: shane +""" +import unittest +from unittest.mock import patch + +import pytest +import requests_mock as r_mock + +from ntclient.services.api import URLS_API, ApiClient, cache_mirrors + +if __name__ == "__main__": + pytest.main() + + +def test_cache_mirrors(requests_mock: r_mock.Mocker) -> None: + """Test cache_mirrors""" + for url in URLS_API: + requests_mock.get(url, status_code=200) + assert cache_mirrors() == "https://api.nutra.tk" + + +def test_cache_mirrors_failing_mirrors_return_empty_string( + requests_mock: r_mock.Mocker, +) -> None: + """Test when cache_mirrors are all down, return empty string.""" + for url in URLS_API: + requests_mock.get(url, status_code=503) + assert cache_mirrors() == str() + + +class TestApiClient(unittest.TestCase): + """Test the ApiClient class.""" + + with patch( + "ntclient.services.api.cache_mirrors", return_value="https://api.nutra.tk" + ): + api_client = ApiClient() + + def test_post(self) -> None: + """Test the post method.""" + with r_mock.Mocker() as m: + m.post("https://api.nutra.tk/test-endpoint", json={}) + res = TestApiClient.api_client.post("test-endpoint", {}) + assert res + + def test_post_bug(self) -> None: + """Test the post_bug method.""" + with r_mock.Mocker() as m: + m.post("https://api.nutra.tk/bug", json={}) + res = TestApiClient.api_client.post_bug({}) + assert res diff --git a/tests/services/test_bug.py b/tests/services/test_bug.py new file mode 100644 index 00000000..90a222ff --- /dev/null +++ b/tests/services/test_bug.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- +""" +Created on Sun Feb 25 16:18:08 2024 + +@author: shane +""" +import unittest +from unittest.mock import MagicMock, patch + +import pytest + +from ntclient.__main__ import main +from ntclient.services import bugs + + +class TestBug(unittest.TestCase): + """Tests the bug service""" + + def test_bug_simulate(self) -> None: + """Tests the functions for simulating a bug""" + with pytest.raises(NotImplementedError): + main(args=["--debug", "bug", "simulate"]) + + def test_bug_list(self) -> None: + """Tests the functions for listing bugs""" + exit_code, _bugs = bugs.list_bugs(show_all=True) + + assert exit_code == 0 + assert len(_bugs) >= 0 + # assert len(rows) >= 0 + # assert len(headers) == 11 + + def test_bug_list_unsubmitted(self) -> None: + """Tests the functions for listing unsubmitted bugs""" + with patch( + "ntclient.services.bugs._list_bugs", + return_value=[{"submitted": False}], + ): + exit_code, _bugs = bugs.list_bugs(show_all=False) + + assert exit_code == 0 + assert len(_bugs) == 1 + _bug = _bugs[0] + assert len(_bug.values()) >= 0 + assert len(_bug.keys()) == 1 + + @patch("ntclient.services.api.cache_mirrors", return_value="https://someurl.com") + @patch( + "ntclient.services.api.ApiClient.post", + return_value=MagicMock(status_code=201), + ) + @patch("ntclient.services.bugs.sql_nt", return_value=([], [], [], [])) + # pylint: disable=unused-argument + def test_bug_report(self, *args: MagicMock) -> None: + """Tests the functions for submitting bugs""" + result = bugs.submit_bugs() + assert isinstance(result, int) + + @patch("ntclient.services.api.cache_mirrors", return_value="https://someurl.com") + @patch( + "ntclient.services.api.ApiClient.post", + return_value=MagicMock(status_code=201), + ) + @patch("ntclient.services.bugs._list_bugs_unsubmitted", return_value=[{"id": 1}]) + @patch("ntclient.services.bugs.sql_nt") + # pylint: disable=unused-argument + def test_bug_report_with_unsubmitted(self, *args: MagicMock) -> None: + """Tests the functions for submitting bugs""" + result = bugs.submit_bugs() + assert isinstance(result, int) + + @patch("ntclient.services.api.cache_mirrors", return_value="https://someurl.com") + @patch( + "ntclient.services.api.ApiClient.post", + return_value=MagicMock(status_code=204), + ) + @patch("ntclient.services.bugs._list_bugs_unsubmitted", return_value=[{"id": 1}]) + @patch("ntclient.services.bugs.sql_nt") + # pylint: disable=unused-argument + def test_bug_report_on_204_status(self, *args: MagicMock) -> None: + """Tests the functions for submitting bugs""" + result = bugs.submit_bugs() + assert result == 1 + + @patch("ntclient.services.api.cache_mirrors", return_value="https://someurl.com") + @patch( + "ntclient.services.api.ApiClient.post", + return_value=MagicMock(status_code=201), + ) + @patch("ntclient.services.bugs._list_bugs_unsubmitted", return_value=[]) + # pylint: disable=unused-argument + def test_bug_report_empty_list(self, *args: MagicMock) -> None: + """Tests the functions for submitting bugs""" + result = bugs.submit_bugs() + assert result == 0 diff --git a/tests/services/test_logs.py b/tests/services/test_logs.py new file mode 100644 index 00000000..ece5a2c8 --- /dev/null +++ b/tests/services/test_logs.py @@ -0,0 +1,80 @@ +""" +Tests for log service. +""" + +import os +import shutil +import tempfile +import unittest +from unittest.mock import patch + +from ntclient.services.logs import log_add, log_analyze, log_view + + +class TestLogs(unittest.TestCase): + """Test class for log service""" + + def setUp(self): + """Setup temp dir""" + self.test_dir = tempfile.mkdtemp() + self.patcher = patch("ntclient.services.logs.NUTRA_HOME", self.test_dir) + self.mock_home = self.patcher.start() + + def tearDown(self): + """Cleanup""" + self.patcher.stop() + shutil.rmtree(self.test_dir) + + @patch("ntclient.services.logs.sql_food_details") + def test_log_add(self, mock_sql): + """Test adding to log""" + # Mock food exists + mock_sql.return_value = [ + (1001, 100, "Test Food", "", "", "", "", "", 0, "", 0, 0, 0, 0) + ] + + log_add(1001, 150.0, "2099-01-01") + + log_path = os.path.join(self.test_dir, "2099-01-01.csv") + self.assertTrue(os.path.exists(log_path)) + with open(log_path, "r", encoding="utf-8") as f: + content = f.read() + self.assertIn("1001,150.0", content) + + @patch("ntclient.services.logs.sql_food_details") + def test_log_add_invalid_food(self, mock_sql): + """Test adding invalid food""" + mock_sql.return_value = [] # Food not found + + # Should print error and not create file (or not append) + # Using print capture could be added, but for now check file state + log_add(9999, 150.0, "2099-01-02") + + log_path = os.path.join(self.test_dir, "2099-01-02.csv") + self.assertFalse(os.path.exists(log_path)) + + @patch("ntclient.services.logs.sql_food_details") + @patch("ntclient.services.logs.read_log") + def test_log_view(self, mock_read, mock_sql): + """Test viewing log""" + mock_read.return_value = [{"id": "1001", "grams": "150.0"}] + # Mock needs 3 elements: id, ..., name + mock_sql.return_value = [(1001, 100, "Test Food")] + + # Just ensure no exception + log_view("2099-01-01") + + @patch("ntclient.services.logs.day_analyze") + def test_log_analyze(self, mock_analyze): + """Test analyzing log""" + # Create dummy log + log_path = os.path.join(self.test_dir, "2099-01-01.csv") + with open(log_path, "w", encoding="utf-8") as f: + f.write("id,grams\n1001,100") + + log_analyze("2099-01-01") + mock_analyze.assert_called_once() + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/services/test_recipe.py b/tests/services/test_recipe.py index f7985f50..b3fe9198 100644 --- a/tests/services/test_recipe.py +++ b/tests/services/test_recipe.py @@ -6,11 +6,13 @@ """ import os import unittest +from unittest.mock import patch import pytest -import ntclient.services.recipe.utils as r -from ntclient.services.recipe import RECIPE_STOCK +import ntclient.services.recipe.recipe as r +from ntclient.models import Recipe +from ntclient.services.recipe import RECIPE_STOCK, csv_utils class TestRecipe(unittest.TestCase): @@ -31,14 +33,23 @@ def test_recipes_overview(self): exit_code, _ = r.recipes_overview() assert exit_code == 0 - @unittest.expectedFailure - @pytest.mark.xfail(reason="Due to a wip refactor") - def test_recipe_overview_throws_exc_for_nonexistent_path(self): - """Raises index error if recipe int id is invalid""" + def test_recipe_process_data_multiple_recipe_uuids_throws_key_error(self): + """Raises key error if recipe uuids are not unique""" + # TODO: this should be a custom exception, i.e. RecipeValidationException + with patch( + "ntclient.models.Recipe._aggregate_rows", + return_value=[{"recipe_id": "UUID_1"}, {"recipe_id": "UUID_2"}], + ): + with pytest.raises(KeyError): + recipe = Recipe("FAKE-PATH") + recipe.process_data() + + def test_recipe_overview_returns_exit_code_1_for_nonexistent_path(self): + """Returns (1, None) if recipe path is invalid""" # TODO: should we be using guid / uuid instead of integer id? - with pytest.raises(IndexError): - r.recipe_overview("-12345-FAKE-PATH-") + result = r.recipe_overview("-12345-FAKE-PATH-") + assert (1, None) == result def test_recipe_overview_might_succeed_for_maybe_existing_id(self): """Tries 'check for existing ID', but only can if the user initialized""" @@ -46,3 +57,15 @@ def test_recipe_overview_might_succeed_for_maybe_existing_id(self): os.path.join(RECIPE_STOCK, "dinner", "burrito-bowl.csv") ) assert exit_code in {0, 1} + + def test_recipe_csv_utils(self): + """Test the (largely unused) CSV utils module""" + _csv_files = csv_utils.csv_files() + assert _csv_files + + _csv_recipes = csv_utils.csv_recipes() + assert _csv_recipes + + # sanity executions + csv_utils.csv_recipe_print_tree() + csv_utils.csv_print_details() diff --git a/tests/test_cli.py b/tests/test_cli.py index 42d8c7ea..cee2f3bf 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -6,10 +6,13 @@ Created on Fri Jan 31 15:19:53 2020 @author: shane +@TODO: split this up... mock out argparser tests; then test missing service lines """ +import datetime import os import sys import unittest +from unittest.mock import patch import pytest @@ -38,30 +41,18 @@ arg_parser = build_arg_parser() -# TODO: attach some env props to it, and re-instantiate a CliConfig() class. -# We're just setting it on the shell, as an env var, before running tests in CI. -# e.g. the equivalent of putting this early in the __init__ file; -# os.environ["NUTRA_HOME"] = os.path.join(TEST_HOME, ".nutra.test") - - class TestCli(unittest.TestCase): """ Original one-stop-shop for testing. @todo: integration tests.. create user, recipe, log.. analyze & compare """ - def test_000_init(self): - """Tests the SQL/persistence init in real time""" - code, result = init(yes=True) - assert code == 0 - assert result - def test_100_usda_sql_funcs(self): """Performs cursory inspection (sanity checks) of usda.sqlite3 image""" version = usda_ver() assert version == __db_target_usda__ - result = usda_funcs.sql_nutrients_details() - assert len(result[1]) == 186 + rows, _ = usda_funcs.sql_nutrients_details() + assert len(rows) == 186 result = usda_funcs.sql_servings({9050, 9052}) assert len(result) == 3 @@ -91,7 +82,7 @@ def test_200_nt_sql_funcs(self): def test_300_argparser_debug_no_paging(self): """Verifies the debug and no_paging flags are set""" - args = arg_parser.parse_args(args=["-d", "--no-pager"]) + args = arg_parser.parse_args(args=["--debug", "--no-pager"]) CLI_CONFIG.set_flags(args) assert args.debug is True @@ -247,7 +238,7 @@ def test_410_nt_argparser_funcs(self): assert result["navy"] == 10.64 # Invalid (failed Navy) - args = arg_parser.parse_args(args="-d calc bf -w 80 -n 40".split()) + args = arg_parser.parse_args(args="--debug calc bf -w 80 -n 40".split()) CLI_CONFIG.set_flags(args) code, result = args.func(args) assert code in {0, 1} # Might be a failed code one day, but returns 0 for now @@ -290,6 +281,24 @@ def test_410_nt_argparser_funcs(self): 17.11, ) + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + # Bug + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + args = arg_parser.parse_args(args="bug".split()) + code, result = args.func(args) + assert code == 0 + assert isinstance(result, list) + args = arg_parser.parse_args(args="bug --show".split()) + code, result = args.func(args) + assert code == 0 + assert isinstance(result, list) + + args = arg_parser.parse_args(args="bug report".split()) + with patch("ntclient.services.bugs.submit_bugs", return_value=1): + code, result = args.func(args) + assert code == 0 + assert result == 1 + def test_415_invalid_path_day_throws_error(self): """Ensures invalid path throws exception in `day` subcommand""" invalid_day_csv_path = os.path.join( @@ -323,8 +332,8 @@ def test_500_main_module(self): nt_main(args=["-h"]) assert system_exit.value.code == 0 - # -d - code = nt_main(args=["-d"]) + # --debug + code = nt_main(args=["--debug"]) assert code == 0 # __main__: if args_dict @@ -342,7 +351,7 @@ def test_600_sql_integrity_error__service_wip(self): # TODO: replace with non-biometric test # from ntclient.services import biometrics # - # args = arg_parser.parse_args(args=["-d", "bio", "log", "add", "12,12"]) + # args = arg_parser.parse_args(args=["--debug", "bio", "log", "add", "12,12"]) # biometrics.input = ( # lambda x: "y" # ) # mocks input, could also pass `-y` flag or set yes=True @@ -375,8 +384,8 @@ def test_800_usda_upgrades_or_downgrades(self): new_release = str(int(release) + 1) new_version = ".".join([major, minor, new_release]) _usda_sql( - "INSERT INTO version (version) VALUES (?)", - values=(new_version,), + "INSERT INTO version (version, created) VALUES (?,?)", + values=(new_version, datetime.datetime.utcnow()), version_check=False, ) @@ -395,7 +404,7 @@ def test_801_sql_invalid_version_error_if_version_old(self): ) with pytest.raises(SqlInvalidVersionError) as sql_invalid_version_error: - nt_main(["-d", "nt"]) + nt_main(["--debug", "nt"]) assert sql_invalid_version_error is not None @unittest.skip(reason="Long-running test, want to replace with more 'unit' style") @@ -414,14 +423,14 @@ def test_802_usda_downloads_fresh_if_missing_or_deleted(self): # TODO: resolve PermissionError on Windows print(repr(err)) _usda_sql( - "INSERT INTO version (version) VALUES (?)", - values=(__db_target_usda__,), + "INSERT INTO version (version, created) VALUES (?,?)", + values=(__db_target_usda__, datetime.datetime.utcnow()), version_check=False, ) pytest.xfail("PermissionError, are you using Microsoft Windows?") # mocks input, could also pass `-y` flag or set yes=True - usda.input = lambda x: "y" # pylint: disable=redefined-builtin + setattr(usda, "input", lambda x: "y") code, successful = init() assert code == 0 @@ -431,7 +440,7 @@ def test_900_nut_rda_bar(self): """Verifies colored/visual output is successfully generated""" analysis = usda_funcs.sql_analyze_foods(food_ids={1001}) nutrients = usda_funcs.sql_nutrients_overview() - output = nutprogbar.nutprogbar( - food_amts={1001: 100}, food_analyses=analysis, nutrients=nutrients + output = nutprogbar.nutrient_progress_bars( + _food_amts={1001: 100}, _food_analyses=analysis, _nutrients=nutrients ) assert output diff --git a/tests/test_init.py b/tests/test_init.py new file mode 100644 index 00000000..97c01805 --- /dev/null +++ b/tests/test_init.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +""" +Created on Fri Apr 12 17:30:01 2024 + +@author: shane +""" +from unittest.mock import patch + +import pytest + +from ntclient import version_check + + +def test_version_check_archaic_python_version_raises_runtime_error() -> None: + """Test that the correct error is raised when the Python version is too low.""" + + with patch("sys.version_info", (3, 4, 0)): + with pytest.raises(RuntimeError) as exc_info: + version_check() + + assert exc_info.type == RuntimeError + assert exc_info.value.args == ( + "ERROR: nutra requires Python 3.4.3 or later to run", + "HINT: You're running Python 3.4.0", + )