From dd4fe10efd8f0db0f6ed7ff3c2e350410ab7713e Mon Sep 17 00:00:00 2001 From: root Date: Fri, 12 Dec 2025 17:02:00 +0000 Subject: [PATCH 01/12] first partial commit, unbuilt yet --- CMakeLists.txt | 63 +-- DataModel/DataModel.h | 240 +++++++-- DataModel/ManagedSocket.h | 13 + DataModel/QueryBatch.h | 58 +++ DataModel/ZmqQuery.h | 72 +++ DataModel/query_typedefs.h | 9 + Makefile | 174 +++---- Setup.sh | 6 +- UserTools/DatabaseWorkers/DatabaseWorkers.cpp | 480 ++++++++++++++++++ UserTools/DatabaseWorkers/DatabaseWorkers.h | 67 +++ UserTools/DatabaseWorkers/README.md | 19 + UserTools/Factory/Factory.cpp | 12 + UserTools/JobManager/JobManager.cpp | 71 +++ UserTools/JobManager/JobManager.h | 39 ++ UserTools/JobManager/README.md | 19 + UserTools/Monitoring/Monitoring.cpp | 241 +++++++++ UserTools/Monitoring/Monitoring.h | 44 ++ UserTools/Monitoring/README.md | 19 + .../MulticastReceiverSender.cpp | 331 ++++++++++++ .../MulticastReceiverSender.h | 68 +++ UserTools/MulticastReceiverSender/README.md | 19 + .../MulticastWorkers/MulticastWorkers.cpp | 390 ++++++++++++++ UserTools/MulticastWorkers/MulticastWorkers.h | 62 +++ UserTools/MulticastWorkers/README.md | 19 + .../ReadQueryReceiverReplySender/README.md | 19 + .../ReadQueryReceiverReplySender.cpp | 326 ++++++++++++ .../ReadQueryReceiverReplySender.h | 63 +++ UserTools/ResultWorkers/README.md | 19 + UserTools/ResultWorkers/ResultWorkers.cpp | 277 ++++++++++ UserTools/ResultWorkers/ResultWorkers.h | 58 +++ UserTools/SocketManager/README.md | 19 + UserTools/SocketManager/SocketManager.cpp | 107 ++++ UserTools/SocketManager/SocketManager.h | 44 ++ UserTools/Unity.h | 12 + UserTools/WriteQueryReceiver/README.md | 19 + .../WriteQueryReceiver/WriteQueryReceiver.cpp | 244 +++++++++ .../WriteQueryReceiver/WriteQueryReceiver.h | 62 +++ UserTools/WriteWorkers/README.md | 19 + UserTools/WriteWorkers/WriteWorkers.cpp | 195 +++++++ UserTools/WriteWorkers/WriteWorkers.h | 56 ++ 40 files changed, 3885 insertions(+), 189 deletions(-) create mode 100644 DataModel/ManagedSocket.h create mode 100644 DataModel/QueryBatch.h create mode 100644 DataModel/ZmqQuery.h create mode 100644 DataModel/query_typedefs.h create mode 100644 UserTools/DatabaseWorkers/DatabaseWorkers.cpp create mode 100644 UserTools/DatabaseWorkers/DatabaseWorkers.h create mode 100644 UserTools/DatabaseWorkers/README.md create mode 100644 UserTools/JobManager/JobManager.cpp create mode 100644 UserTools/JobManager/JobManager.h create mode 100644 UserTools/JobManager/README.md create mode 100644 UserTools/Monitoring/Monitoring.cpp create mode 100644 UserTools/Monitoring/Monitoring.h create mode 100644 UserTools/Monitoring/README.md create mode 100644 UserTools/MulticastReceiverSender/MulticastReceiverSender.cpp create mode 100644 UserTools/MulticastReceiverSender/MulticastReceiverSender.h create mode 100644 UserTools/MulticastReceiverSender/README.md create mode 100644 UserTools/MulticastWorkers/MulticastWorkers.cpp create mode 100644 UserTools/MulticastWorkers/MulticastWorkers.h create mode 100644 UserTools/MulticastWorkers/README.md create mode 100644 UserTools/ReadQueryReceiverReplySender/README.md create mode 100644 UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.cpp create mode 100644 UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.h create mode 100644 UserTools/ResultWorkers/README.md create mode 100644 UserTools/ResultWorkers/ResultWorkers.cpp create mode 100644 UserTools/ResultWorkers/ResultWorkers.h create mode 100644 UserTools/SocketManager/README.md create mode 100644 UserTools/SocketManager/SocketManager.cpp create mode 100644 UserTools/SocketManager/SocketManager.h create mode 100644 UserTools/WriteQueryReceiver/README.md create mode 100644 UserTools/WriteQueryReceiver/WriteQueryReceiver.cpp create mode 100644 UserTools/WriteQueryReceiver/WriteQueryReceiver.h create mode 100644 UserTools/WriteWorkers/README.md create mode 100644 UserTools/WriteWorkers/WriteWorkers.cpp create mode 100644 UserTools/WriteWorkers/WriteWorkers.h diff --git a/CMakeLists.txt b/CMakeLists.txt index f2fdc57..def0e62 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,17 +1,17 @@ +#todo sym links not copy headers, use macro to seach for .so files in Usertools and add the libraries to libs list and symlink to libs folder + cmake_minimum_required (VERSION 2.6) project (ToolDAQApplicaiton) -set(TOOLDAQ_PATH "${PROJECT_SOURCE_DIR}/ToolDAQ") - -#include(${TOOLDAQ_PATH}/ToolDAQFramework/CMakeLists.include) +set(DEPENDENCIES_PATH "${PROJECT_SOURCE_DIR}/Dependencies") -set(ZMQ_INC "${TOOLDAQ_PATH}/zeromq-4.0.7/include/") -set(ZMQ_LIB_PATH "${TOOLDAQ_PATH}/zeromq-4.0.7/lib") +set(ZMQ_INC "${DEPENDENCIES_PATH}/zeromq-4.0.7/include/") +set(ZMQ_LIB_PATH "${DEPENDENCIES_PATH}/zeromq-4.0.7/lib") set(ZMQ_LIBS zmq) -set(BOOST_INC "${TOOLDAQ_PATH}/boost_1_66_0/install/include/") -set(BOOST_LIB_PATH "${TOOLDAQ_PATH}/boost_1_66_0/install/lib") +set(BOOST_INC "${DEPENDENCIES_PATH}/boost_1_66_0/install/include/") +set(BOOST_LIB_PATH "${DEPENDENCIES_PATH}/boost_1_66_0/install/lib") set(BOOST_LIBS boost_date_time boost_serialization boost_iostreams) set(DATAMODEL_INC "") @@ -22,21 +22,19 @@ set(MYTOOLS_INC "") set(MYTOOLS_LIB_PATH "") set(MYTOOLS_LIBS "") -#add_subdirectory(${TOOLDAQ_PATH}/ToolDAQFramework/ ./ToolDAQ/ToolDAQFramework/) +set(TOOLFRAMEWORK_INC "${DEPENDENCIES_PATH}/ToolFrameworkCore/include") +set(TOOLFRAMEWORK_LIBS_PATH "${DEPENDENCIES_PATH}/ToolFrameworkCore/lib") +set(TOOLFRAMEWORK_LIBS DataModelBase Logging Store ToolChain) -if(NOT(${PROJECT_SOURCE_DIR} STREQUAL ${PROJECT_BINARY_DIR})) -message("Not Building in source directory: Copying files") -FILE(COPY ${PROJECT_SOURCE_DIR}/configfiles DESTINATION ${PROJECT_BINARY_DIR}/) -FILE(COPY ${PROJECT_SOURCE_DIR}/UserTools DESTINATION ${PROJECT_BINARY_DIR}/) -FILE(COPY ${PROJECT_SOURCE_DIR}/DataModel DESTINATION ${PROJECT_BINARY_DIR}/) -FILE(COPY ${PROJECT_SOURCE_DIR}/Setup.sh DESTINATION ${PROJECT_BINARY_DIR}/) -endif() +set(TOOLDAQ_INC "${DEPENDENCIES_PATH}/ToolDAQFramework/include") +set(TOOLDAQ_LIBS_PATH "${DEPENDENCIES_PATH}/ToolDAQFramework/lib") +set(TOOLDAQ_LIBS DAQDataModelBase DAQLogging DAQStore ServiceDiscovery ToolDAQChain) -include_directories(${PROJECT_BINARY_DIR}/DataModel ${BOOST_INC} ${ZMQ_INC} ${DATAMODEL_INC} ${MYTOOLS_INC} ${TOOLDAQ_PATH}/ToolDAQFramework/include ${TOOLDAQ_PATH}/ToolDAQFramework/src/Tool ${TOOLDAQ_PATH}/ToolDAQFramework/src/ToolChain ${TOOLDAQ_PATH}/ToolDAQFramework/src/Logging ${TOOLDAQ_PATH}/ToolDAQFramework/src/Store ${TOOLDAQ_PATH}/ToolDAQFramework/src/ServiceDiscovery/) -link_directories("${PROJECT_BINARY_DIR}/lib" ${BOOST_LIB_PATH} ${ZMQ_LIB_PATH} ${DATAMODEL_LIB_PATH} ${MYTOOLS_LIB_PATH} ${TOOLDAQ_PATH}/ToolDAQFramework/lib) +include_directories (${DATAMODEL_INC} ${MYTOOLS_INC} ${TOOLFRAMEWORK_INC} ${TOOLDAQ_INC} ${ZMQ_INC} ${BOOST_INC}) +link_directories(${DATAMODEL_LIB_PATH} ${MYTOOLS_LIB_PATH} ${TOOLFRAMEWORK_LIBS_PATH} ${TOOLDAQ_LIBS_PATH} ${ZMQ_LIB_PATH} ${BOOST_LIB_PATH}) MACRO(HEADER_DIRECTORIES return_list) - FILE(GLOB_RECURSE new_list ${PROJECT_BINARY_DIR}/UserTools/*.h) + FILE(GLOB_RECURSE new_list ${PROJECT_SOURCE_DIR}/src/*.h ${PROJECT_SOURCE_DIR}/DataModel/*.h ${PROJECT_SOURCE_DIR}/UserTools/*.h ) FILE(COPY ${new_list} DESTINATION ${PROJECT_BINARY_DIR}/include) SET(dir_list "") FOREACH(file_path ${new_list}) @@ -47,37 +45,18 @@ MACRO(HEADER_DIRECTORIES return_list) SET(${return_list} ${dir_list}) ENDMACRO() +FILE(COPY ${PROJECT_SOURCE_DIR}/configfiles DESTINATION ${PROJECT_BINARY_DIR}/) + HEADER_DIRECTORIES(header_list) include_directories(${header_list}) set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/lib) -file(GLOB_RECURSE STORE_SRC RELATIVE ${CMAKE_SOURCE_DIR} "${TOOLDAQ_PATH}/ToolDAQFramework/src/Store/*.cpp") -add_library(Store SHARED ${STORE_SRC}) - -file(GLOB_RECURSE LOGGING_SRC RELATIVE ${CMAKE_SOURCE_DIR} "${TOOLDAQ_PATH}/ToolDAQFramework/src/Logging/*.cpp") -add_library(Logging SHARED ${LOGGING_SRC}) - -file(GLOB_RECURSE SERVICEDISCOVERY_SRC RELATIVE ${CMAKE_SOURCE_DIR} "${TOOLDAQ_PATH}/ToolDAQFramework/src/ServiceDiscovery/*.cpp") -add_library(ServiceDiscovery SHARED ${SERVICEDISCOVERY_SRC}) - -file(GLOB_RECURSE DATAMODEL_SRC RELATIVE ${CMAKE_BINARY_DIR} "DataModel/*.cpp") +file(GLOB_RECURSE DATAMODEL_SRC RELATIVE ${CMAKE_SOURCE_DIR} "DataModel/*.cpp") add_library(DataModel SHARED ${DATAMODEL_SRC}) -file(GLOB_RECURSE MYTOOLS_SRC RELATIVE ${CMAKE_BINARY_DIR} "UserTools/*.cpp") +file(GLOB_RECURSE MYTOOLS_SRC RELATIVE ${CMAKE_SOURCE_DIR} "UserTools/*.cpp") add_library(MyTools SHARED ${MYTOOLS_SRC}) -include_directories(${TOOLDAQ_PATH}/ToolDAQFramework/src/Logging) - -file(GLOB_RECURSE TOOLCHAIN_SRC RELATIVE ${CMAKE_SOURCE_DIR} "${TOOLDAQ_PATH}/ToolDAQFramework/src/ToolChain/*.cpp") -add_library(ToolChain SHARED ${TOOLCHAIN_SRC}) - - add_executable (main ${PROJECT_SOURCE_DIR}/src/main.cpp) -target_link_libraries (main Store Logging ToolChain ServiceDiscovery MyTools DataModel ${ZMQ_LIBS} ${BOOST_LIBS} ${DATAMODEL_LIBS} ${MYTOOLS_LIBS}) - -add_executable ( NodeDaemon ${TOOLDAQ_PATH}/ToolDAQFramework/src/NodeDaemon/NodeDaemon.cpp) -target_link_libraries (NodeDaemon Store ServiceDiscovery ${ZMQ_LIBS} ${BOOST_LIBS}) - -add_executable ( RemoteControl ${TOOLDAQ_PATH}/ToolDAQFramework/src/RemoteControl/RemoteControl.cpp) -target_link_libraries (RemoteControl Store ServiceDiscovery ${ZMQ_LIBS} ${BOOST_LIBS}) +target_link_libraries (main MyTools DataModel pthread ${DATAMODEL_LIBS} ${MYTOOLS_LIBS} ${TOOLFRAMEWORK_LIBS} ${TOOLDAQ_LIBS} ${ZMQ_LIBS} ${BOOST_LIBS}) diff --git a/DataModel/DataModel.h b/DataModel/DataModel.h index d46a6d4..06df1f9 100644 --- a/DataModel/DataModel.h +++ b/DataModel/DataModel.h @@ -1,59 +1,211 @@ #ifndef DATAMODEL_H #define DATAMODEL_H -#include -#include #include +#include +#include -//#include "TTree.h" - -#include "Store.h" -#include "BoostStore.h" -#include "Logging.h" +#include "DAQDataModelBase.h" #include "Utilities.h" - -#include +#include "Pool.h" +#include "JobQueue.h" /** * \class DataModel - * - * This class Is a transient data model class for your Tools within the ToolChain. If Tools need to comunicate they pass all data objects through the data model. There fore inter tool data objects should be deffined in this class. +* +* This class is a transient data model class for your Tools within the ToolChain. If Tools need to communicate they pass all data through the data model. Therefore inter-tool data variables should be defined in this class. * * * $Author: B.Richards $ - * $Date: 2019/05/26 18:34:00 $ - * Contact: b.richards@qmul.ac.uk - * - */ - -class DataModel { - - - public: - - DataModel(); ///< Simple constructor - - //TTree* GetTTree(std::string name); - //void AddTTree(std::string name,TTree *tree); - //void DeleteTTree(std::string name); - - Store vars; ///< This Store can be used for any variables. It is an inefficent ascii based storage - BoostStore CStore; ///< This is a more efficent binary BoostStore that can be used to store a dynamic set of inter Tool variables. - std::map Stores; ///< This is a map of named BooStore pointers which can be deffined to hold a nammed collection of any tipe of BoostStore. It is usefull to store data that needs subdividing into differnt stores. - - Logging *Log; ///< Log class pointer for use in Tools, it can be used to send messages which can have multiple error levels and destination end points - - zmq::context_t* context; ///< ZMQ contex used for producing zmq sockets for inter thread, process, or computer communication - - - private: - - - - //std::map m_trees; - - - + * $Date: 2019/05/26 $ + * Contact: benjamin.richards@warwick.ac.uk + * +*/ + +using namespace ToolFramework; + +class DataModel : public DAQDataModelBase { + + public: + DataModel(); ///< Simple constructor + + private: + + Utilities utils; ///< for thread management + + // Tools can add connections to this and the SocketManager + // will periodically invoke UpdateConnections to connect clients + std::map managed_sockets; + std::mutex managed_sockets_mtx; + + Pool job_pool; ///< pool of job structures to encapsulate jobs + JobQueue job_queue; ///< job queue to submit jobs to job manager + uint32_t thread_cap; ///< total number of thread cap to use in the program + std::atomic num_threads; ///< current number of threads + unsigned int worker_threads; + unsigned int max_worker_threads; + + /* ----------------------------------------- */ + /* MulticastReceiveSender */ + /* ----------------------------------------- */ + + // pool of string buffers: + // the receiver thread grabs a vector from the pool, fills it, + // the pushes the filled vector into the in_multicast_msg_queue + // and grabs a new vector from the pool + // FIXME base pool size on available RAM and struct size / make configurable + // Pool::Pool(bool in_manage=false, uint16_t period_ms=1000, size_t in_object_cap=1) + Pool> multicast_buffer_pool(true, 5000, 100); + + // batches of received messages, both logging and monitoring + // FIXME make these pairs or structs, container+mtx + // FIXME if instead of just a vector we used MulticastBatch, we could accumulate the length + // and then reserve in advance the length of the string needed for the combined message....? + // XXX actually only if we tracked by topic, as one vector gets turned into 5 topical concat'd strings... + std::vector*> in_multicast_msg_queue; + std::mutex in_multicast_msg_queue_mtx; + + // Logging + // ------- + // Tracking + //{ TODO encapsulate in Tool monitoring struct? + std::atomic log_polls_failed; // error polling socket + std::atomic log_recv_fails; // error in recv_from + std::atomic logs_recvd; // messages successfully received + std::atomic log_in_buffer_transfers; // transfers of thread-local message vector to datamodel + std::atomic log_out_buffer_transfers; // transfers of thread-local message vector to datamodel + std::atomic log_thread_crashes; // restarts of logging thread (main thread found reader thread 'running' was false) + //} + // outgoing logging messages + std::vector out_log_msg_queue; + std::mutex out_log_msg_queue_mtx; + + // Monitoring + // ---------- + //{ + std::atomic mon_polls_failed; + std::atomic mon_recv_fails; + std::atomic mons_recvd; + std::atomic mon_in_buffer_transfers; // transfers of thread-local message vector to datamodel + std::atomic mon_out_buffer_transfers; // transfers of thread-local message vector to datamodel + std::atomic mon_thread_crashes; + //} + // outgoing monitoring messages + std::vector out_mon_msg_queue; + std::mutex out_mon_msg_queue_mtx; + + // pool is shared between read and write query receivers + Pool querybatch_pool(true, 5000, 100); + + /* ----------------------------------------- */ + /* PubReceiver */ + /* ----------------------------------------- */ + // TODO Tool monitoring struct? + std::atomic pub_rcv_thread_crashes; + std::vector write_msg_queue; + std::mutex write_msg_queue_mtx; + std::atomic write_polls_failed; + std::atomic write_msgs_rcvd; + std::atomic write_rcv_fails; + std::atomic write_bad_msgs; + std::atomic write_buffer_transfers; + + //} + + // TODO move to struct + { + zmq::socket_t* pub_socket; + std::mutex pub_socket_mtx; // socket needed by Reader and SocketManager (for finding new clients) + } + + /* ----------------------------------------- */ + /* ReadReply */ + /* ----------------------------------------- */ + // TODO Tool monitoring struct? + std::atomic read_rcv_thread_crashes; + std::vector read_msg_queue; + std::mutex read_msg_queue_mtx; + std::atomic readrep_polls_failed; + std::atomic readrep_msgs_rcvd; + std::atomic readrep_rcv_fails; + std::atomic readrep_bad_msgs; + std::atomic readrep_reps_sent; + std::atomic readrep_rep_send_fails; + std::atomic readrep_in_buffer_transfers; + std::atomic readrep_out_buffer_transfers; + + // TODO move to struct + { + zmq::socket_t* read_socket; + std::mutex read_socket_mtx; // socket needed by Reader and SocketManager (for finding new clients) + } + + std::vector query_replies; + std::mutex query_replies_mtx; + + /* ----------------------------------------- */ + /* MulticastWorkers */ + /* ----------------------------------------- */ + // each element is a batch of JSON that can be inserted by the DatabaseWorkers + // FIXME these strings represent batches of multicast messages, so could be very large. + // each push_back could require reallocation, which could involve moving a lot of very large message buffers + // FIXME make these pointers, put the strings (maybe make a struct? maybe just a typedef/alias?) in a pool? + std::vector log_query_queue; + std::mutex log_query_queue_mtx; + + std::vector mon_query_queue; + std::mutex mon_query_queue_mtx; + + std::vector rootplot_query_queue; + std::mutex rootplot_query_queue_mtx; + + std::vector plotlyplot_query_queue; + std::mutex plotlyplot_query_queue_mtx; + + std::atomic multicast_job_distributor_thread_crashes; + std::atomic multicast_worker_job_fails; + std::atomic multicast_worker_job_successes; + + /* ----------------------------------------- */ + /* WriteWorkers */ + /* ----------------------------------------- */ + std::atomic write_job_distributor_thread_crashes; + + std::vector write_query_queue; + std::mutex write_query_queue_mtx; + + std::atomic write_worker_job_fails; + std::atomic write_worker_job_successes; + + /* ----------------------------------------- */ + /* DatabaseWorkers */ + /* ----------------------------------------- */ + + std::atomic database_job_distributor_thread_crashes; + std::vector read_replies; // output, awaiting for result conversion + std::mutex read_replies_mtx; + + std::atomic db_worker_job_successes; // FIXME add for others + std::atomic db_worker_job_fails; + + /* ----------------------------------------- */ + /* ResultWorkers */ + /* ----------------------------------------- */ + std::atomic result_job_distributor_thread_crashes; + + std::atomic result_worker_job_fails; + std::atomic result_worker_job_successes; + + /* ----------------------------------------- */ + /* Monitoring */ + /* ----------------------------------------- */ + std::atomic monitoring_thread_crashes; + + /* ----------------------------------------- */ + /* SocketManager */ + /* ----------------------------------------- */ + std::atomic socket_manager_thread_crashes; + }; diff --git a/DataModel/ManagedSocket.h b/DataModel/ManagedSocket.h new file mode 100644 index 0000000..359a77e --- /dev/null +++ b/DataModel/ManagedSocket.h @@ -0,0 +1,13 @@ +#ifndef ManagedSocket_H +#define ManagedSocket_H + +struct ManagedSocket { + std::mutex socket_mtx; + zmq::socket_t* socket=nullptr; + std::string service_name; + std::string port; + std::string port_name; + std::map connections; +}; + +#endif diff --git a/DataModel/QueryBatch.h b/DataModel/QueryBatch.h new file mode 100644 index 0000000..396211a --- /dev/null +++ b/DataModel/QueryBatch.h @@ -0,0 +1,58 @@ +#ifndef QUERY_BATCH_H +#define QUERY_BATCH_H + +struct QueryBatch { + // fill / read by receive/senders + QueryBatch(size_t prealloc_size){ + queries.reserve(prealloc_size); + } + std::vector queries; + + // prepare for batch insertion by workers + std::string alarm_buffer; + std::string devconfig_buffer; + std::string runconfig_buffer; + std::string calibration_buffer; + std::string plotlyplot_buffer; + std::string rooplot_buffer; + void reset(){ + alarm_buffer = "["; + devconfig_buffer = "["; + runconfig_buffer = "["; + calibration_buffer = "["; + plotlyplot_buffer = "["; + rooplot_buffer = "["; + + alarm_batch_status = false; + + // the presence of returned version numbers is indication that these batch insertions worked + devconfig_version_nums.clear(); + runconfig_version_nums.clear(); + calibration_version_nums.clear(); + plotlyplot_version_nums.clear(); + rootplot_version_nums.clear(); + + } + + // set by database workers for batch submissions + bool alarm_batch_success; + + // FIXME check type returned from pqxx + std::vector devconfig_version_nums; + std::vector runconfig_version_nums; + std::vector calibration_version_nums; + std::vector plotlyplot_version_nums; + std::vector rootplot_version_nums; + +// // convert to zmq message on return path by workers +// void setsuccess(uint32_t succeeded){ +// for(ZmqQuery& q : queries) q.setsuccess(succeeded); +// } +// void setversionnums(){ +// for(size_t i=0; i + +struct ZmqQuery { + + ZmqQuery(){}; + ~ZmqQuery(){}; + + // no copy constructor + ZmqQuery(const ZmqQuery&) = delete; + // no copy assignment operator + ZmqQuery& operator=(const ZmqQuery&) = delete; + + // allow move constructor + ZmqQuery(ZmqQuery&& c) = default; + // allow move assignment operator + ZmqQuery& operator=(ZmqQuery&& c) = default; + + // 4 parts for receiving, for sending 3+ parts + std::vector parts(4); + size_t size() const { + return parts.size(); + } + + // pub socket: topic, client, msgnum, query + // router socket: client, topic, msgnum, query + // replies: client, msgnum, success, results (if present)... + + zmq::message_t& operator[](int i){ + return parts[i]; + } + // received and returned + std::string_view client_id(){ + return std::string_view{parts[0].data(),parts[0].data().size()}; + } + uint32_t msg_id(){ + return *reinterpret_cast(parts[1].data()); + } + // received only + std::string_view topic(){ + return std::string_view{parts[2].data(),parts[2].data().size()}; + } + std::string_view msg(){ + return std::string_view{parts[3].data(),parts[3].data().size()}; + } + + // for setting success + void setsuccess(uint32_t succeeded){ + //parts[2]=new(parts[2]) zmq::message_t(sizeof(uint32_t)); // uhh, is there a better way to call zmq_msg_init_size? + zmq_msg_init_size(&parts[2],sizeof(uint32_t)); // this is from underlying c api... FIXME? + memcpy((void*)parts[2].data(),&succeeded,sizeof(uint32_t)); + return; + } + + // for read queries, returned directly from pqxx, decoded later + pqxx::result result; + + // for setting responses of read queries + void setresponserows(size_t n_rows){ + parts.resize(3+n_rows); + return; + } + void setresponse(size_t row_num, std::string_view row){ + zmq_msg_init_size(&parts[row_num+3],row.size()); // this is from underlying c api... FIXME? + memcpy((void*)parts[row_num+3].data(),row.data(),row.size()); + return; + } +} + + +#endif diff --git a/DataModel/query_typedefs.h b/DataModel/query_typedefs.h new file mode 100644 index 0000000..5117b41 --- /dev/null +++ b/DataModel/query_typedefs.h @@ -0,0 +1,9 @@ +#ifndef QUERY_TYPES_H +#define QUERY_TYPES_H + +// used by MulticastWorkers and DatabaseWorkers +// only write query topics +enum class query_topic char { alarm='A', dev_config='D', run_config='R', calibration='C', logging='L', monitoring='M', rootplot='T', plotlyplot='P', generic='Q' }; + +#endif + diff --git a/Makefile b/Makefile index f65bb15..8968bed 100644 --- a/Makefile +++ b/Makefile @@ -1,126 +1,100 @@ -ToolDAQPath=ToolDAQ +Dependencies=Dependencies +ToolFrameworkCore=$(Dependencies)/ToolFrameworkCore +ToolDAQFramework=$(Dependencies)/ToolDAQFramework +SOURCEDIR=`pwd` + +CXXFLAGS= -fPIC -std=c++11 -Wno-comment # -Wpedantic -Wall -Wno-unused -Wextra -Wcast-align -Wcast-qual -Wctor-dtor-privacy -Wdisabled-optimization -Wformat=2 -Winit-self -Wlogical-op -Wmissing-declarations -Wmissing-include-dirs -Wnoexcept -Woverloaded-virtual -Wredundant-decls -Wshadow -Wsign-conversion -Wsign-promo -Wstrict-null-sentinel -Wstrict-overflow=5 -Wswitch-default -Wundef #-Werror -Wold-style-cast -CXXFLAGS= -fPIC -Wpedantic -O3 # -g -lSegFault -rdynamic -DDEBUG -# -Wl,--no-as-needed ifeq ($(MAKECMDGOALS),debug) -CXXFLAGS+= -O1 -g -lSegFault -rdynamic -DDEBUG +CXXFLAGS+= -O0 -g -lSegFault -rdynamic -DDEBUG +else +CXXFLAGS+= -O3 endif -ZMQLib= -L $(ToolDAQPath)/zeromq-4.0.7/lib -lzmq -ZMQInclude= -I $(ToolDAQPath)/zeromq-4.0.7/include/ +DataModelInclude = +DataModelLib = -BoostLib= -L $(ToolDAQPath)/boost_1_66_0/install/lib -lboost_date_time -lboost_serialization -lboost_iostreams -BoostInclude= -I $(ToolDAQPath)/boost_1_66_0/install/include +MyToolsInclude = +MyToolsLib = -DataModelInclude = -DataModelLib = +ZMQLib= -L $(Dependencies)/zeromq-4.0.7/lib -lzmq +ZMQInclude= -I $(Dependencies)/zeromq-4.0.7/include/ -MyToolsInclude = -MyToolsLib = +BoostLib= -L $(Dependencies)/boost_1_66_0/install/lib -lboost_date_time -lboost_serialization -lboost_iostreams +BoostInclude= -I $(Dependencies)/boost_1_66_0/install/include + +Includes=-I $(ToolFrameworkCore)/include/ -I $(ToolDAQFramework)/include/ -I $(SOURCEDIR)/include/ $(ZMQInclude) $(BoostInclude) +ToolLibraries = $(patsubst %, lib/%, $(filter lib%, $(subst /, , $(wildcard UserTools/*/*.so)))) +LIBRARIES=lib/libDataModel.so lib/libMyTools.so $(ToolLibraries) +DataModelHEADERS:=$(patsubst %.h, include/%.h, $(filter %.h, $(subst /, ,$(wildcard DataModel/*.h)))) +MyToolHEADERS:=$(patsubst %.h, include/%.h, $(filter %.h, $(subst /, ,$(wildcard UserTools/*/*.h) $(wildcard UserTools/*.h)))) +ToolLibs = $(patsubst %.so, %, $(patsubst lib%, -l%,$(filter lib%, $(subst /, , $(wildcard UserTools/*/*.so))))) +AlreadyCompiled = $(wildcard UserTools/$(filter-out %.so UserTools , $(subst /, ,$(wildcard UserTools/*/*.so)))/*.cpp) +SOURCEFILES:=$(patsubst %.cpp, %.o, $(filter-out $(AlreadyCompiled), $(wildcard src/*.cpp) $(wildcard UserTools/*/*.cpp) $(wildcard DataModel/*.cpp))) +Libs=-L $(SOURCEDIR)/lib/ -lDataModel -L $(ToolDAQFramework)/lib/ -lToolDAQChain -lDAQDataModelBase -lDAQLogging -lServiceDiscovery -lDAQStore -L $(ToolFrameworkCore)/lib/ -lToolChain -lMyTools -lDataModelBase -lLogging -lStore -lpthread $(ToolLibs) -L $(ToolDAQFramework)/lib/ -lToolDAQChain -lDAQDataModelBase -lDAQLogging -lServiceDiscovery -lDAQStore $(ZMQLib) $(BoostLib) + + +#.SECONDARY: $(%.o) + +all: $(DataModelHEADERS) $(MyToolHEADERS) $(SOURCEFILES) $(LIBRARIES) main NodeDaemon RemoteControl debug: all -all: lib/libStore.so lib/libLogging.so lib/libDataModel.so include/Tool.h lib/libMyTools.so lib/libServiceDiscovery.so lib/libToolChain.so main RemoteControl NodeDaemon +main: src/main.o $(LIBRARIES) $(DataModelHEADERS) $(MyToolHEADERS) | $(SOURCEFILES) + @echo -e "\e[38;5;11m\n*************** Making " $@ " ****************\e[0m" + g++ $(CXXFLAGS) $< -o $@ $(Includes) $(Libs) $(DataModelInclude) $(DataModelLib) $(MyToolsInclude) $(MyToolsLib) + +include/%.h: + @echo -e "\e[38;5;87m\n*************** sym linking headers ****************\e[0m" + ln -s `pwd`/$(filter %$(strip $(patsubst include/%.h, /%.h, $@)), $(wildcard DataModel/*.h) $(wildcard UserTools/*/*.h) $(wildcard UserTools/*.h)) $@ + +src/%.o : src/%.cpp + @echo -e "\e[38;5;214m\n*************** Making " $@ "****************\e[0m" + g++ $(CXXFLAGS) -c $< -o $@ $(Includes) -main: src/main.cpp | lib/libMyTools.so lib/libStore.so lib/libLogging.so lib/libToolChain.so lib/libDataModel.so lib/libServiceDiscovery.so - @echo -e "\e[38;5;226m\n*************** Making " $@ "****************\e[0m" - g++ $(CXXFLAGS) src/main.cpp -o main -I include -L lib -lStore -lMyTools -lToolChain -lDataModel -lLogging -lServiceDiscovery -lpthread $(DataModelInclude) $(DataModelLib) $(MyToolsInclude) $(MyToolsLib) $(ZMQLib) $(ZMQInclude) $(BoostLib) $(BoostInclude) +UserTools/Factory/Factory.o : UserTools/Factory/Factory.cpp $(DataModelHEADERS) $(MyToolHEADERS) + @echo -e "\e[38;5;214m\n*************** Making " $@ "****************\e[0m" + g++ $(CXXFLAGS) -c $< -o $@ $(Includes) $(DataModelInclude) $(ToolsInclude) +UserTools/%.o : UserTools/%.cpp $(DataModelHEADERS) UserTools/%.h + @echo -e "\e[38;5;214m\n*************** Making " $@ "****************\e[0m" + g++ $(CXXFLAGS) -c $< -o $@ $(Includes) $(DataModelInclude) $(ToolsInclude) -lib/libStore.so: $(ToolDAQPath)/ToolDAQFramework/src/Store/* - cd $(ToolDAQPath)/ToolDAQFramework && $(MAKE) lib/libStore.so - @echo -e "\e[38;5;118m\n*************** Copying " $@ "****************\e[0m" - cp $(ToolDAQPath)/ToolDAQFramework/src/Store/*.h include/ - cp $(ToolDAQPath)/ToolDAQFramework/lib/libStore.so lib/ - #g++ -g -O2 -fPIC -shared -I include $(ToolDAQPath)/ToolDAQFramework/src/Store/*.cpp -o lib/libStore.so $(BoostLib) $(BoostInclude) +DataModel/%.o : DataModel/%.cpp DataModel/%.h $(DataModelHEADERS) + @echo -e "\e[38;5;214m\n*************** Making " $@ "****************\e[0m" + g++ $(CXXFLAGS) -c $< -o $@ $(Includes) $(DataModelInclude) +lib/libDataModel.so: $(patsubst %.cpp, %.o , $(wildcard DataModel/*.cpp)) | $(DataModelHEADERS) + @echo -e "\e[38;5;201m\n*************** Making " $@ "****************\e[0m" + g++ $(CXXFLAGS) --shared $^ -o $@ $(Includes) $(DataModelInclude) -include/Tool.h: $(ToolDAQPath)/ToolDAQFramework/src/Tool/Tool.h - @echo -e "\e[38;5;118m\n*************** Copying " $@ "****************\e[0m" - cp $(ToolDAQPath)/ToolDAQFramework/src/Tool/Tool.h include/ - cp UserTools/*.h include/ - cp UserTools/*/*.h include/ - cp DataModel/*.h include/ +lib/libMyTools.so: $(patsubst %.cpp, %.o , $(filter-out $(AlreadyCompiled), $(wildcard UserTools/*/*.cpp))) | $(DataModelHEADERS) $(MyToolHEADERS) + @echo -e "\e[38;5;201m\n*************** Making " $@ "****************\e[0m" + g++ $(CXXFLAGS) --shared $^ -o $@ $(Includes) $(DataModelInclude) $(MyToolsInclude) +lib/%.so: + @echo -e "\e[38;5;87m\n*************** sym linking Tool libs ****************\e[0m" + ln -s `pwd`/$(filter %$(strip $(patsubst lib/%.so, /%.so ,$@)), $(wildcard UserTools/*/*.so)) $@ -lib/libToolChain.so: $(ToolDAQPath)/ToolDAQFramework/src/ToolChain/* | lib/libLogging.so lib/libStore.so lib/libMyTools.so lib/libServiceDiscovery.so lib/libLogging.so lib/libDataModel.so - @echo -e "\e[38;5;226m\n*************** Making " $@ "****************\e[0m" - cp $(ToolDAQPath)/ToolDAQFramework/UserTools/Factory/*.h include/ - cp $(ToolDAQPath)/ToolDAQFramework/src/ToolChain/*.h include/ - g++ $(CXXFLAGS) -shared $(ToolDAQPath)/ToolDAQFramework/src/ToolChain/ToolChain.cpp -I include -lpthread -L lib -lStore -lDataModel -lServiceDiscovery -lLogging -lMyTools -o lib/libToolChain.so $(DataModelInclude) $(DataModelLib) $(ZMQLib) $(ZMQInclude) $(MyToolsInclude) $(BoostLib) $(BoostInclude) +NodeDaemon: $(ToolDAQFramework)/NodeDaemon + @echo -e "\e[38;5;87m\n*************** sym linking " $@ " ****************\e[0m" + ln -s $(ToolDAQFramework)/NodeDaemon ./ +RemoteControl: $(ToolDAQFramework)/RemoteControl + @echo -e "\e[38;5;87m\n*************** sym linking " $@ " ****************\e[0m" + ln -s $(ToolDAQFramework)/RemoteControl ./ -clean: +clean: @echo -e "\e[38;5;201m\n*************** Cleaning up ****************\e[0m" + rm -f */*/*.o + rm -f */*.o rm -f include/*.h rm -f lib/*.so - rm -f main - rm -f RemoteControl - rm -f NodeDaemon - rm -f UserTools/*/*.o - rm -f DataModel/*.o - -lib/libDataModel.so: DataModel/* lib/libLogging.so lib/libStore.so $(patsubst DataModel/%.cpp, DataModel/%.o, $(wildcard DataModel/*.cpp)) - @echo -e "\e[38;5;226m\n*************** Making " $@ "****************\e[0m" - cp DataModel/*.h include/ - #g++ -g -O2 -fPIC -shared DataModel/*.cpp -I include -L lib -lStore -lLogging -o lib/libDataModel.so $(DataModelInclude) $(DataModelLib) $(ZMQLib) $(ZMQInclude) $(BoostLib) $(BoostInclude) - g++ $(CXXFLAGS) -shared DataModel/*.o -I include -L lib -lStore -lLogging -o lib/libDataModel.so $(DataModelInclude) $(DataModelLib) $(ZMQLib) $(ZMQInclude) $(BoostLib) $(BoostInclude) - -lib/libMyTools.so: UserTools/*/* UserTools/* include/Tool.h lib/libLogging.so lib/libStore.so $(patsubst UserTools/%.cpp, UserTools/%.o, $(wildcard UserTools/*/*.cpp)) |lib/libDataModel.so - @echo -e "\e[38;5;226m\n*************** Making " $@ "****************\e[0m" - cp UserTools/*/*.h include/ - cp UserTools/*.h include/ - #g++ -g -O2 -fPIC -shared UserTools/Factory/Factory.cpp -I include -L lib -lStore -lDataModel -lLogging -o lib/libMyTools.so $(MyToolsInclude) $(MyToolsLib) $(DataModelInclude) $(DataModelLib) $(ZMQLib) $(ZMQInclude) $(BoostLib) $(BoostInclude) - g++ $(CXXFLAGS) -shared UserTools/*/*.o -I include -L lib -lStore -lDataModel -lLogging -o lib/libMyTools.so $(MyToolsInclude) $(DataModelInclude) $(MyToolsLib) $(ZMQLib) $(ZMQInclude) $(BoostLib) $(BoostInclude) - -RemoteControl: - cd $(ToolDAQPath)/ToolDAQFramework/ && $(MAKE) RemoteControl - @echo -e "\e[38;5;118m\n*************** Copying " $@ "****************\e[0m" - cp $(ToolDAQPath)/ToolDAQFramework/RemoteControl ./ - -NodeDaemon: - cd $(ToolDAQPath)/ToolDAQFramework/ && $(MAKE) NodeDaemon - @echo -e "\e[38;5;226m\n*************** Copying " $@ "****************\e[0m" - cp $(ToolDAQPath)/ToolDAQFramework/NodeDaemon ./ - -lib/libServiceDiscovery.so: $(ToolDAQPath)/ToolDAQFramework/src/ServiceDiscovery/* | lib/libStore.so - cd $(ToolDAQPath)/ToolDAQFramework && $(MAKE) lib/libServiceDiscovery.so - @echo -e "\e[38;5;118m\n*************** Copying " $@ "****************\e[0m" - cp $(ToolDAQPath)/ToolDAQFramework/src/ServiceDiscovery/ServiceDiscovery.h include/ - cp $(ToolDAQPath)/ToolDAQFramework/lib/libServiceDiscovery.so lib/ - #g++ -shared -fPIC -I include $(ToolDAQPath)/ToolDAQFramework/src/ServiceDiscovery/ServiceDiscovery.cpp -o lib/libServiceDiscovery.so -L lib/ -lStore $(ZMQInclude) $(ZMQLib) $(BoostLib) $(BoostInclude) - -lib/libLogging.so: $(ToolDAQPath)/ToolDAQFramework/src/Logging/* | lib/libStore.so - cd $(ToolDAQPath)/ToolDAQFramework && $(MAKE) lib/libLogging.so - @echo -e "\e[38;5;118m\n*************** Copying " $@ "****************\e[0m" - cp $(ToolDAQPath)/ToolDAQFramework/src/Logging/Logging.h include/ - cp $(ToolDAQPath)/ToolDAQFramework/lib/libLogging.so lib/ - #g++ -shared -fPIC -I include $(ToolDAQPath)/ToolDAQFramework/src/Logging/Logging.cpp -o lib/libLogging.so -L lib/ -lStore $(ZMQInclude) $(ZMQLib) $(BoostLib) $(BoostInclude) - -update: - @echo -e "\e[38;5;51m\n*************** Updating ****************\e[0m" - cd $(ToolDAQPath)/ToolDAQFramework; git pull - cd $(ToolDAQPath)/zeromq-4.0.7; git pull - git pull - - -UserTools/%.o: UserTools/%.cpp lib/libStore.so include/Tool.h lib/libLogging.so lib/libDataModel.so - @echo -e "\e[38;5;226m\n*************** Making " $@ "****************\e[0m" - cp $(shell dirname $<)/*.h include - -g++ -c $(CXXFLAGS) -o $@ $< -I include -L lib -lStore -lDataModel -lLogging $(MyToolsInclude) $(MyToolsLib) $(DataModelInclude) $(DataModelLib) $(ZMQLib) $(ZMQInclude) $(BoostLib) $(BoostInclude) - -target: remove $(patsubst %.cpp, %.o, $(wildcard UserTools/$(TOOL)/*.cpp)) - -remove: - echo -e "removing" - -rm UserTools/$(TOOL)/*.o - -DataModel/%.o: DataModel/%.cpp lib/libLogging.so lib/libStore.so - @echo -e "\e[38;5;226m\n*************** Making " $@ "****************\e[0m" - cp $(shell dirname $<)/*.h include - -g++ -c $(CXXFLAGS) -o $@ $< -I include -L lib -lStore -lLogging $(DataModelInclude) $(DataModelLib) $(ZMQLib) $(ZMQInclude) $(BoostLib) $(BoostInclude) - + rm -rf main + rm -rf NodeDaemon + rm -rf RemoteControl Docs: doxygen Doxyfile + diff --git a/Setup.sh b/Setup.sh index f520e30..563712e 100755 --- a/Setup.sh +++ b/Setup.sh @@ -1,11 +1,11 @@ #!/bin/bash +export PS1='${debian_chroot:+($debian_chroot)}\[\033[35;2;1m\]\u@\h\[\033[00m\]:\[\033[00;36m\]\w\[\033[00m\]\$ ' #Application path location of applicaiton - -ToolDAQapp=`pwd` +Dependencies=/opt #source ${ToolDAQapp}/ToolDAQ/root/bin/thisroot.sh -export LD_LIBRARY_PATH=`pwd`/lib:${ToolDAQapp}/lib:${ToolDAQapp}/ToolDAQ/zeromq-4.0.7/lib:${ToolDAQapp}/ToolDAQ/boost_1_66_0/install/lib:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH=`pwd`/lib:${Dependencies}/zeromq-4.0.7/lib:${Dependencies}/boost_1_66_0/install/lib:${Dependencies}/ToolFrameworkCore/lib:${Dependencies}/ToolDAQFramework/lib:$LD_LIBRARY_PATH export SEGFAULT_SIGNALS="all" diff --git a/UserTools/DatabaseWorkers/DatabaseWorkers.cpp b/UserTools/DatabaseWorkers/DatabaseWorkers.cpp new file mode 100644 index 0000000..14ad3c1 --- /dev/null +++ b/UserTools/DatabaseWorkers/DatabaseWorkers.cpp @@ -0,0 +1,480 @@ +#include "DatabaseWorkers.h" +#include "GenericFunctions.h" + +DatabaseWorkers::DatabaseWorkers():Tool(){} + + +bool DatabaseWorkers::Initialise(std::string configfile, DataModel &data){ + + if(configfile!="") m_variables.Initialise(configfile); + //m_variables.Print(); + + m_data= &data; + m_log= m_data->Log; + + if(!m_variables.Get("verbose",m_verbose)) m_verbose=1; + + // ########################################################################## + // default initialize variables + // ########################################################################## + std::string dbhostname = "/tmp"; // '/tmp' = local unix socket + std::string dbhostaddr = ""; // fallback if hostname is empty, an ip address + int dbport = 5432; // database port + std::string dbuser = ""; // database user to connect as. defaults to PGUSER env var if empty. + std::string dbpasswd = ""; // database password. defaults to PGPASS or PGPASSFILE if not given. + + // on authentication: we may consider using 'ident', which will permit the + // user to connect to the database as the postgres user with name matching + // their OS username, and/or the database user mapped to their username + // with the pg_ident.conf file in postgres database. in such a case dbuser and dbpasswd + // should be left empty + + // ########################################################################## + // # Update with user-specified values. + // ########################################################################## + // TODO make these config var keys specific for each db. + m_variables.Get("hostname",dbhostname); + m_variables.Get("hostaddr",dbhostaddr); + m_variables.Get("hostaddr",dbname); + m_variables.Get("port",dbport); + m_variables.Get("user",dbuser); + m_variables.Get("passwd",dbpasswd); + + // ########################################################################## + // # Open connection + // ########################################################################## + + // pass connection details to the postgres interface class + if(dbhostname!="") tmp<<" host="< Log + return false; + } + // closes connection here on destruction + } catch (const pqxx::broken_connection &e){ + // as usual the doxygen sucks, but it seems this doesn't provide + // any further methods to obtain information about the failure mode, + // so probably not useful to catch this explicitly. + std::cerr << e.what() << std::endl; // FIXME cerr -> Log + return false; + } + catch (std::exception const &e){ + std::cerr << e.what() << std::endl; // FIXME cerr -> Log + return false; + } + + // we *do* need a unique worker pool here because these workers + // maintain a connection to the database, so are a 'limited resource' + job_manager = new WorkerPoolManager(database_jobqueue, &max_workers, &(m_data->thread_cap), &(m_data->num_threads), nullptr, true); + + thread_args.m_data = m_data; + thread_args.job_queue = &database_jobqueue; + m_data->utils.CreateThread("database_job_distributor", &Thread, &thread_args); + m_data->num_threads++; + + return true; +} + + +bool DatabaseWorkers::Execute(){ + + // the main thread is going to lock the datamodel vector of queries + // grab a bunch of entries, and spin off a job for each batch of queries + // (possibly doing this several times to spin off multiple jobs) + + // FIXME ok but actually this kills all our jobs, not just our job distributor + // so we don't want to do that. + if(!thread_args->running){ + Log(m_tool_name+" Execute found thread not running!",v_error); + Finalise(); + Initialise(); // FIXME should we give up if Initialise returns false? should we set StopLoop to 1? + ++m_data->database_job_distributor_thread_crashes; + } + + return true; +} + + +bool DatabaseWorkers::Finalise(){ + + // signal job distributor thread to stop + Log(m_tool_name+": Joining job distributor thread",v_warning); + m_data->utils.KillThread(&thread_args); + Log(m_tool_name+": Finished",v_warning); + m_data->num_threads--; + + // deleting the worker pool manager will kill all the worker threads + Log(m_tool_name+": Joining database worker thread pool",v_warning); + delete job_manager; + job_manager = nullptr; + m_data->num_threads--; + Log(m_tool_name+": Finished",v_warning); + + return true; +} + +// ««-------------- ≪ °◇◆◇° ≫ --------------»» + +void DatabaseWorkers::Thread(Thread_args* arg){ + + m_args = dynamic_cast(arg); + + // add a new Job to the job queue to process this data + Job* the_job = m_data->job_pool.GetNew(&m_data->job_pool, "database_worker"); + if(the_job->data == nullptr){ + // on first creation of the job, make it a JobStruct to encapsulate its data + // N.B. Pool::GetNew will only invoke the constructor if this is a new instance, + // (not if it's been used before and then returned to the pool) + // so don't pass job-specific variables to the constructor + the_job->data = m_args->job_struct_pool.GetNew(&m_args->job_struct_pool, m_args->m_data); + } else { + // FIXME error + std::cerr<<"database_worker Job with non-null data pointer!"<func = DatabaseJob; + the_job->fail_func = DatabaseJobFail; + + DatabaseJobStruct* job_data = dynamic_cast(the_job->data); + job_data->connection_string = connection_string; + + // XXX ok we have flexibility here on how much we want each worker to grab + // the more we do in one transaction (one job) the better throughput... + // but with possibly greater latency on replies + + // grab logging queries + locker = std::unique_lock(m_args->m_data->log_query_queue_mtx); + if(!m_args->m_data->log_query_queue.empty()){ + std::swap(m_args->m_data->log_query_queue, job_data->logging_queue); + } + + // grab monitoring queries + locker = std::unique_lock(m_args->m_data->mon_query_queue_mtx); + if(!m_args->m_data->mon_query_queue.empty()){ + std::swap(m_args->m_data->mon_query_queue, job_data->monitoring_queue); + } + + // grab write queries + locker = std::unique_lock(m_args->m_data->write_query_queue_mtx); + if(!m_args->m_data->write_query_queue.empty()){ + std::swap(m_args->m_data->write_query_queue, job_data->write_queue); + } + + // grab read queries + std::unique_lock locker(m_args->m_data->read_msg_queue_mtx); + if(!m_args->m_data->read_msg_queue.empty()){ + std::swap(m_args->m_data->read_msg_queue, job_data->read_queue); + } + + // if they go over multicast + // grab rootplot queries + locker = std::unique_lock(m_args->m_data->rootplot_query_queue_mtx); + if(!m_args->m_data->rootplot_query_queue.empty()){ + std::swap(m_args->m_data->rootplot_query_queue, job_data->rootplot_queue); + } + + // grab plotlyplot queries + locker = std::unique_lock(m_args->m_data->plotlyplot_query_queue_mtx); + if(!m_args->m_data->plotlyplot_query_queue.empty()){ + std::swap(m_args->m_data->plotlyplot_query_queue, job_data->plotlyplot_queue); + } + + locker.unlock(); + + /*ok =*/ m_args->job_queue.AddJob(the_job); // just checks if you've defined func and first_vals = true; + +} + +// ««-------------- ≪ °◇◆◇° ≫ --------------»» + +void WriteWorkers::DatabaseJobFail(void*& arg){ + + // safety check in case the job somehow fails after returning its args to the pool + if(arg==nullptr){ + return; // FIXME log this occurrence? + } + + // FIXME do something here + // if there were preceding messages that were succesfully added + // we could try to insert the current buffers so that those get processed. + // but we don't know where we failed, so that could be risky if the buffer is corrupt? + // we could keep track of where we were in m_args and: + // 1. log the specific message we were trying to process when the job failed + // 2. submit the data we already have + // 3. make a new job for the remaining data + // this probably seems better, but be careful not to get stuck in a fail loop + // if the problem isn't the query + + // at minimum we need to pass our vector back somewhere for the failures + // to be reported to the clients + //m_args->m_data->query_buffer_pool.Add(m_args->msg_buffer); << FIXME not back to the pool but reply queue + + //query.result.clear(); // to clear/release bad results... + // ideally we want to pass back an error or what happened to the client? + + WriteJobStruct* m_args=reinterpret_cast(arg); + ++(*m_args->m_data->db_worker_job_fails); + + // return our job args to the pool + m_args->m_pool.Add(m_args); + m_args = nullptr; // clear the local m_args variable... not strictly necessary + arg = nullptr; // clear the job 'data' member variable + +} + +// ««-------------- ≪ °◇◆◇° ≫ --------------»» + +void DatabaseWorkers::DatabaseJob(void*& arg){ + + DatabaseJobStruct* m_args = dynamic_cast(arg); + + // the worker will need a connection to the database + thread_local pqxx::connection* conn; + if(conn==nullptr){ + conn = new pqxx::connection(m_args->connection_string); + if(!conn){ + Log("Failed to open connection to database for worker thread!",v_error); // FIXME logging + // FIXME terminate this worker... m_args->running=false? + return; + } else { + // set up prepared statements. These are, sadly, a property of the connection + // logging insert + conn.prepare("logging_insert", "INSERT INTO logging ( time, device, severity, message ) SELECT * FROM jsonb_to_recordset( $1::jsonb ) as t(time timestamptz, device text, severity int, message text)"); + // monitoring insert + conn.prepare("monitoring_insert", "INSERT INTO monitoring ( time, device, severity, message ) SELECT * FROM jsonb_to_recordset( $1::jsonb ) as t(time timestamptz, device text, subject text, data jsonb)"); + // alarms insert + conn.prepare("alarms_insert", "INSERT INTO alarms ( time, device, level, alarm ) SELECT * FROM jsonb_to_recordset( $1::jsonb ) as t(time timestamptz, device text, level int, alarm text)"); + // rootplot insert + conn.prepare("rootplots_insert", "INSERT INTO rootplots ( time, name, data, draw_options ) SELECT * FROM jsonb_to_recordset( $1::jsonb ) as t(time timestamptz, name text, data jsonb, draw_options text)"); + // plotlyplot insert + conn.prepare("plotlyplots_insert", "INSERT INTO plotlyplots ( time, name, data, layout ) SELECT * FROM jsonb_to_recordset( $1::jsonb ) as t(time timestamptz, name text, data jsonb, layout jsonb)"); + // calibration insert + conn.prepare("calibration_insert", "INSERT INTO calibration ( time, name, severity, message ) SELECT * FROM jsonb_to_recordset( $1::jsonb ) as t(time timestamptz, name text, description text, data jsonb)"); + // device config insert + conn.prepare("device_config_insert", "INSERT INTO device_config ( time, device, author, description, data ) SELECT * FROM jsonb_to_recordset( $1::jsonb ) as t(time timestamptz, device text, author text, description text, data jsonb)"); + // run config insert + conn.prepare("run_config_insert", "INSERT INTO run_config ( time, name, author, description, data ) SELECT * FROM jsonb_to_recordset( $1::jsonb ) as t(time timestamptz, name text, author text, description text, data jsonb)"); + } + + // we also use a single transaction for all queries, so open that now + pqxx::work tx(conn); // aka pqxx::transaction<> + + // insert new logging statements + try { + tx.exec(pqxx::prepped{"logging_insert"}, pqxx::params{m_args->logging_queue}); + ++(*m_args->m_data->n_logging_submissions); + } catch (std::exception& e){ + ++(*m_args->m_data->n_logging_submissions_failed); + // FIXME log the error here + } + + // insert new monitoring statements + try { + tx.exec(pqxx::prepped{"monitoring_insert"}, pqxx::params{m_args->monitoring_queue}); + ++(*m_args->m_data->n_monitoring_submissions); + } catch (std::exception& e){ + ++(*m_args->m_data->n_monitoring_submissions_failed); + // FIXME log the error here + } + + // insert new multicast rootplot statements + try { + tx.exec(pqxx::prepped{"rootplots_insert"}, pqxx::params{m_args->rootplot_queue}); + ++(*m_args->m_data->n_rootplot_submissions); + } catch (std::exception& e){ + ++(*m_args->m_data->n_rootplot_submissions_failed); + // FIXME log the error here + } + + // insert new multicast plotlyplot statements + try { + tx.exec(pqxx::prepped{"plotlyplots_insert"}, pqxx::params{m_args->plotlyplot_queue}); + ++(*m_args->m_data->n_plotlyplot_submissions); + } catch (std::exception& e){ + ++(*m_args->m_data->n_plotlyplot_submissions_failed); + // FIXME log the error here + } + + // write queries + for(QueryBatch* batch : m_args->write_queue){ + // the batch gets split up by WriteWorkers into a buffer for each type of write query + + // alarm insertions return nothing, just catch errors + try { + tx.exec(pqxx::prepped{"alarms_insert"}, pqxx::params{batch->alarm_buffer}); + batch->alarm_batch_success = true; + ++(*m_args->m_data->n_alarm_submissions); + } catch (std::exception& e){ + batch->alarm_batch_success = false; + ++(*m_args->m_data->n_alarm_submissions_failed); + // FIXME log the error here + } + + // the remaining insertions return the new version number + // `pqxx::transaction_base::for_query` runs a query and invokes a callable for each result row + // we use this to collect the returned version numbers into a vector + // N.B. `pqxx::transaction_base::for_stream` is an alternative that is faster for large results + // but slower for small results. TODO check whether ours count as 'large' .. probably not. + + // device config insertions + try { + tx.for_query(pqxx::prepped{"device_config_insert"}, + [&batch](int32_t new_version_num){ + batch->devconfig_version_nums.push_back(new_version_num); + }, pqxx::params{batch->devconfig_buffer}); + ++(*m_args->m_data->n_devconfig_submissions); + } catch (std::exception& e){ + ++(*m_args->m_data->n_devconfig_submissions_failed); + // FIXME log the error here + } + + // run config insertions + try { + tx.for_query(pqxx::prepped{"run_config_insert"}, + [&batch](int32_t new_version_num){ + batch->runconfig_version_nums.push_back(new_version_num); + }, pqxx::params{batch->runconfig_buffer}); + ++(*m_args->m_data->n_runconfig_submissions); + } catch (std::exception& e){ + ++(*m_args->m_data->n_runconfig_submissions_failed); + // FIXME log the error here + } + + // calibration data insertions + try { + tx.for_query(pqxx::prepped{"calibration_insert"}, + [&batch](int32_t new_version_num){ + batch->calibration_version_nums.push_back(new_version_num); + }, pqxx::params{batch->calibration_buffer}); + ++(*m_args->m_data->n_calibration_submissions); + } catch (std::exception& e){ + ++(*m_args->m_data->n_calibration_submissions_failed); + // FIXME log the error here + } + + // rootplot insertions + try { + tx.for_query(pqxx::prepped{"rootplots_insert"}, + [&batch](int32_t new_version_num){ + batch->rootplot_version_nums.push_back(new_version_num); + }, pqxx::params{batch->rooplot_buffer}); + ++(*m_args->m_data->n_rootplot_submissions); + } catch (std::exception& e){ + ++(*m_args->m_data->n_rootplot_submissions_failed); + // FIXME log the error here + } + + // plotlyplot insertions + try { + tx.for_query(pqxx::prepped{"plotlyplots_insert"}, + [&batch](int32_t new_version_num){ + batch->plotlyplot_version_nums.push_back(new_version_num); + }, pqxx::params{batch->plotlyplot_buffer}); + ++(*m_args->m_data->n_plotlyplot_submissions); + } catch (std::exception& e){ + ++(*m_args->m_data->n_plotlyplot_submissions_failed); + // FIXME log the error here + } + } + + // read queries + // since these don't actually modify the database, if any query or the final 'commit' fails, + // then preceding queries should already have their results + // (and FIXME check this, maybe later retrieve calls still work too?) + // if so, we can: + // 1. move them to the start of the job, so we can salvage what ran successfully? + // the trouble with that is these queries are "less reliable" since they are not necessarily formed by us. + // perhaps we could separate out `Query` topic jobs to run at the end after the `commit` call? XXX probably this! + // 2. move these to a separate worker job? + + // each batch contains a vector of queries, but unlike inserts, we can't batch these FIXME i think? + // for giggles, we'll pipeline them. This may even improve performance. + pqxx::pipeline px(tx); + for(QueryBatch* batch : m_args->read_queue){ + // it may be best to set the pipeline to retain ~the number of queries we're going to insert, + // so that it runs them all in one. TODO or maybe do it in two halves? + px.retain(batch->queries.size()); + + // insert all the queries + for(ZmqQuery& query : batch->queries){ + px.insert(query.msg); // returns a unique query_id (aka long) + } + + // and then get the results + for(ZmqQuery& query : batch->queries){ + try { + query.result.clear(); // should be redundant...but in case of error in ResultWorkers + if(px.empty()){ + // we should never find the pipeline empty! + // we call retreive once for each insert, somehow we've got out of sync!! + // FIXME log error, somehow we need to undo this mess. + // maybe it's best we do keep those query_ids after all...? + } + query.result = px.retrieve().second; + // technically this returns a pair of {query_id, result} + // TODO for safety we could ensure the id's match... + ++(*m_args->m_data->n_readquery_submissions); + // FIXME technically we should decrement this if we throw anywhere as the whole lot gets rolled back? + } catch (std::exception& e){ + ++(*m_args->m_data->n_readquery_submissions_failed); + } + } + + // sanity check + if(!px.empty()){ + // pipeline should be empty! somehow we've retrieved more results than we should have?? + // FIXME log error, do something + } + } + // we're done with the pipeline: close it and detach, whatever that means. + px.complete(); + + // commit the transaction. Need to do this before it goes out of scope or the whole thing will be rolled back! + // FIXME we've already copied out vesion numbers and success statuses as we went along, but if this happens + // those statuses need to be reset!!! FIXME i guess do this in fail func? + // we therefore need to throw for ANY errors to invoke this! + try { + tx.commit(); + } catch(std::exception& e){ + // oh yeaaa, the transaction might have commited, or it might not have. awesome. + // our consolation prize is a `pqxx::in_doubt_error`. + // FIXME supposedly, it is up to us to determine whether it committed or not + // perhaps by attempting to query whether the inserted records are found... + } + + // pass the batch onto the next stage of the pipeline for the DatabaseWorkers + std::unique_lock locker(m_args->m_data->query_replies_mtx); + m_args->m_data->query_replies.insert(m_args->m_data->query_replies.end(), + m_args->write_queue.begin(),m_args->write_queue.end()); + + locker = std::unique_lock(m_args->m_data->read_replies_mtx); + m_args->m_data->read_replies.insert(m_args->m_data->read_replies.end(), + m_args->read_queue.begin(),m_args->read_queue.end()); + locker.unlock(); + + ++(*m_args->m_data->db_worker_job_successes); + + // return our job args to the pool + m_args->m_pool.Add(m_args); // return our job args to the job args struct pool + m_args = nullptr; // clear the local m_args variable... not strictly necessary + arg = nullptr; // clear the job 'data' member variable + + + return; +} + + diff --git a/UserTools/DatabaseWorkers/DatabaseWorkers.h b/UserTools/DatabaseWorkers/DatabaseWorkers.h new file mode 100644 index 0000000..26c58ef --- /dev/null +++ b/UserTools/DatabaseWorkers/DatabaseWorkers.h @@ -0,0 +1,67 @@ +#ifndef DatabaseWorkers_H +#define DatabaseWorkers_H + +#include + +#include "Tool.h" +#include "DataModel.h" +#include "WorkerPoolManager.h" + + +/** +* \class DatabaseWorkers +* +* This Tool manages a pool of workers, each with a connection to the backend database, to run the queries. +* +* $Author: M. O'Flaherty $ +* $Date: 2025/12/08 $ +* Contact: marcus.o-flaherty@warwick.ac.uk +*/ + +struct DatabaseJobDistributor_args { + DataModel* m_data; + Postgres* m_database; + Pool job_struct_pool; + +}; + +struct DatabaseJobStruct { + + DatabaseJobStruct(Pool* pool, DataModel* data) : m_pool(pool) m_data(data){}; + DataModel* m_data; + Pool* m_pool; + + std::string connection_string; + std::vector*> local_multicast_queue; + + std::vector read_queue; + std::vector write_queue; + std::vector logging_queue; + std::vector monitoring_queue; + std::vector rootplot_queue; + std::vector plotlyplot_queue; + +}; + +class DatabaseWorkers: public Tool { + + public: + DatabaseWorkers(); ///< Simple constructor + bool Initialise(std::string configfile,DataModel &data); ///< Initialise Function for setting up Tool resorces. @param configfile The path and name of the dynamic configuration file to read in. @param data A reference to the transient data class used to pass information between Tools. + bool Execute(); ///< Execute function used to perform Tool purpose. + bool Finalise(); ///< Finalise function used to clean up resources. + + private: + static void Thread(Thread_args* args); + DatabaseJobDistributor_args thread_args; + + WorkerPoolManager* job_manager=nullptr; ///< manager for worker farm, has internal background thread that spawns new jobs and or prunes them, along with tracking statistics + JobQueue database_jobqueue; ///< job queue for worker farm + + static void DatabaseJob(void*& arg); + static void DatabaseJobFail(void*& args); + +}; + + +#endif diff --git a/UserTools/DatabaseWorkers/README.md b/UserTools/DatabaseWorkers/README.md new file mode 100644 index 0000000..a737d86 --- /dev/null +++ b/UserTools/DatabaseWorkers/README.md @@ -0,0 +1,19 @@ +# DatabaseWorkers + +DatabaseWorkers + +## Data + +Describe any data formats DatabaseWorkers creates, destroys, changes, analyzes, or its usage. + + + + +## Configuration + +Describe any configuration variables for DatabaseWorkers. + +``` +param1 value1 +param2 value2 +``` diff --git a/UserTools/Factory/Factory.cpp b/UserTools/Factory/Factory.cpp index eb8e90d..4cf7d00 100644 --- a/UserTools/Factory/Factory.cpp +++ b/UserTools/Factory/Factory.cpp @@ -5,5 +5,17 @@ Tool* ret=0; // if (tool=="Type") tool=new Type; if (tool=="DummyTool") ret=new DummyTool; +if (tool=="MulticastReceiver") ret=new MulticastReceiver; +if (tool=="MulticastWorkers") ret=new MulticastWorkers; +if (tool=="DatabaseWorkers") ret=new DatabaseWorkers; +if (tool=="QueueTrimmer") ret=new QueueTrimmer; +if (tool=="WriteQueryReceiver") ret=new WriteQueryReceiver; +if (tool=="ReadQueryReceiverReplySender") ret=new ReadQueryReceiverReplySender; +if (tool=="WriteWorkers") ret=new WriteWorkers; +if (tool=="MiddlemanNegotiate") ret=new MiddlemanNegotiate; +if (tool=="Monitoring") ret=new Monitoring; +if (tool=="SocketManager") ret=new SocketManager; +if (tool=="ResultWorkers") ret=new ResultWorkers; +if (tool=="JobManager") ret=new JobManager; return ret; } diff --git a/UserTools/JobManager/JobManager.cpp b/UserTools/JobManager/JobManager.cpp new file mode 100644 index 0000000..d326b7a --- /dev/null +++ b/UserTools/JobManager/JobManager.cpp @@ -0,0 +1,71 @@ +#include "JobManager.h" + +JobManager::JobManager():Tool(){} + + +bool JobManager::Initialise(std::string configfile, DataModel &data){ + + InitialiseTool(data); + m_configfile = configfile; + InitialiseConfiguration(configfile); + //m_variables.Print(); + + // FIXME add to other Tools + LoadConfig(); + + m_data->num_threads=0; // tracker + worker_pool_manager= new WorkerPoolManager(m_data->job_queue, &m_thread_cap, &(m_data->thread_cap), &(m_data->num_threads), nullptr, self_serving); + + // FIXME add to other Tools + ExportConfiguration(); + + return true; +} + + +bool JobManager::Execute(){ + + // TODO add this to other Tools? + if(m_data->change_config){ + InitialiseConfiguration(m_configfile); + LoadConfig(); + ExportConfiguration(); + } + + /* TODO + m_data->monitoring_store_mtx.lock(); + m_data->monitoring_store.Set("pool_threads",worker_pool_manager->NumThreads()); + m_data->monitoring_store.Set("queued_jobs",m_data->job_queue.size()); + m_data->monitoring_store_mtx.unlock(); + // printf("jobmanager q:t = %d:%d\n", m_data->job_queue.size(), worker_pool_manager->NumThreads()); + usleep(1000); + sleep(5); + worker_pool_manager->PrintStats(); + printf("buffersize %u\n", m_data->aggrigation_buffer.size()); + if(worker_pool_manager->NumThreads()==m_thread_cap) m_data->services->SendLog("Warning: Worker Pool Threads Maxed" , 0); //make this a warning + std::cout<<"globalThreads="<num_threads<num_threads--; + + return true; +} + + +// FIXME add to other Tools +void JobManager::LoadConfig(){ + if(!m_variables.Get("verbose",m_verbose)) m_verbose=1; + if(!m_variables.Get("thread_cap",m_thread_cap)) m_thread_cap = double(std::thread::hardware_concurrency())*0.8; + if(!m_variables.Get("global_thread_cap",m_data->thread_cap)) m_data->thread_cap = m_thread_cap; + if(!m_variables.Get("self_serving", self_serving) self_serving = true; + return; +} + diff --git a/UserTools/JobManager/JobManager.h b/UserTools/JobManager/JobManager.h new file mode 100644 index 0000000..34ab539 --- /dev/null +++ b/UserTools/JobManager/JobManager.h @@ -0,0 +1,39 @@ +#ifndef JobManager_H +#define JobManager_H + +#include + +#include "Tool.h" +#include "DataModel.h" +#include "WorkerPoolManager.h" + +/** +* \class JobManager +* +* This Tool instantiates a WorkerPoolManager to manage the numer of worker threads for processing multicast messages, write queries and responses. +* +* $Author: Marcus O'Flaherty $ +* $Date: 2025/12/10 $ +* Contact: marcus.o-flaherty@warwick.ac.uk +*/ + +class JobManager: public Tool { + + public: + JobManager(); ///< Simple constructor + bool Initialise(std::string configfile,DataModel &data); ///< Initialise Function for setting up Tool resorces. @param configfile The path and name of the dynamic configuration file to read in. @param data A reference to the transient data class used to pass information between Tools. + bool Execute(); ///< Execute function used to purform Tool purpose. + bool Finalise(); ///< Finalise funciton used to clean up resources. + + private: + bool self_serving; + unsigned int m_thread_cap; + WorkerPoolManager* worker_pool_manager; + + std::string m_configfile; + void LoadConfig(); + +}; + + +#endif diff --git a/UserTools/JobManager/README.md b/UserTools/JobManager/README.md new file mode 100644 index 0000000..2aca3dd --- /dev/null +++ b/UserTools/JobManager/README.md @@ -0,0 +1,19 @@ +# JobManager + +JobManager + +## Data + +Describe any data formats JobManager creates, destroys, changes, analyzes, or its usage. + + + + +## Configuration + +Describe any configuration variables for JobManager. + +``` +param1 value1 +param2 value2 +``` diff --git a/UserTools/Monitoring/Monitoring.cpp b/UserTools/Monitoring/Monitoring.cpp new file mode 100644 index 0000000..f04d536 --- /dev/null +++ b/UserTools/Monitoring/Monitoring.cpp @@ -0,0 +1,241 @@ +#include "Monitoring.h" + +Monitoring::Monitoring():Tool(){} + + +bool Monitoring::Initialise(std::string configfile, DataModel &data){ + + if(configfile!="") m_variables.Initialise(configfile); + //m_variables.Print(); + + m_data= &data; + m_log= m_data->Log; + + if(!m_variables.Get("verbose",m_verbose)) m_verbose=1; + + // how often to write out monitoring stats + int monitoring_ms = 60000; + m_variables.Get("monitoring_period_ms",monitoring_ms); + + thread_args.monitoring_period_ms = std::chrono::milliseconds{monitoring_ms}; + thread_args.last_send = std::chrononow(); + m_data->utils.CreateThread("monitoring", &Thread, &thread_args); // thread needs a unique name + m_data->num_threads++; + + //m_data->services->AddService("middleman", 5000); // is this needed? what for?? + + return true; +} + + +bool Monitoring::Execute(){ + + if(!thread_args.running){ + Log(m_tool_name+" Execute found thread not running!",v_error); + Finalise(); + Initialise(); // FIXME should we give up if Initialise returns false? should we set StopLoop to 1? + // FIXME if restarts > X times in last Y mins, alarm (bypass, shove into DB? send to websocket?) and StopLoop. + ++(m_data->monitoring_thread_crashes); + } + + return true; +} + + +bool Monitoring::Finalise(){ + + return true; +} + +// ««-------------- ≪ °◇◆◇° ≫ --------------»» + +bool Monitoring::Thread(){ + + if((m_args->last_send - std::chrononow()) > monitoring_period_ms){ + + // to calculate rates we need to know the difference in number + // of reads/writes since last time. So get the last values + unsigned long last_write_query_count; + unsigned long last_read_query_count; + unsigned long last_log_count; + unsigned long last_mon_count; + MonitoringStore.Get("write_queries_recvd", last_write_query_count); + MonitoringStore.Get("read_queries_recvd", last_read_query_count); + MonitoringStore.Get("logs_recvd", last_log_count); + MonitoringStore.Get("mons_recvd", last_mon_count); + + // calculate rates are per minute + elapsed_time = boost::posix_time::microsec_clock::universal_time() - last_stats_calc; + + float read_query_rate = (elapsed_time.total_seconds()==0) ? 0 : + ((read_queries_recvd - last_read_query_count) * 60.) / elapsed_time.total_seconds(); + float write_query_rate = (elapsed_time.total_seconds()==0) ? 0 : + ((write_queries_recvd - last_write_query_count) * 60.) / elapsed_time.total_seconds(); + float log_rate = (elapsed_time.total_seconds()==0) ? 0 : + ((logs_recvd - last_log_count) * 60.) / elapsed_time.total_seconds(); + float mon_rate = (elapsed_time.total_seconds()==0) ? 0 : + ((mons_recvd - last_mon_count) * 60.) / elapsed_time.total_seconds(); + + // dump all stats into a Store. + MonitoringStore.Set("min_loop_time",min_loop_ms); + MonitoringStore.Set("max_loop_time",max_loop_ms); + MonitoringStore.Set("loops",loops); + MonitoringStore.Set("loop_rate [Hz]",loops/elapsed_time.total_seconds()); + MonitoringStore.Set("write_queries_waiting",wrt_txn_queue.size()); + MonitoringStore.Set("read_queries_waiting",rd_txn_queue.size()); + MonitoringStore.Set("replies_waiting",resp_queue.size()); + MonitoringStore.Set("incoming_logs_waiting",in_log_queue.size()); + MonitoringStore.Set("incoming_mons_waiting",in_mon_queue.size()); + MonitoringStore.Set("out_multicasts_waiting",out_multicast_queue.size()); + MonitoringStore.Set("cached_queries",cache.size()); + MonitoringStore.Set("write_queries_recvd", write_queries_recvd); + MonitoringStore.Set("write_query_recv_fails", write_query_recv_fails); + MonitoringStore.Set("read_queries_recvd", read_queries_recvd); + MonitoringStore.Set("read_query_recv_fails", read_query_recv_fails); + MonitoringStore.Set("logs_recvd", logs_recvd.load()); + MonitoringStore.Set("mons_recvd", mons_recvd.load()); + MonitoringStore.Set("log_recv_fails", log_recv_fails.load()); + MonitoringStore.Set("mon_recv_fails", mon_recv_fails.load()); + MonitoringStore.Set("mm_broadcasts_recvd", mm_broadcasts_recvd); + MonitoringStore.Set("mm_broadcast_recv_fails", mm_broadcast_recv_fails); + MonitoringStore.Set("write_queries_failed", write_queries_failed); + MonitoringStore.Set("log_queries_failed", log_queries_failed.load()); + MonitoringStore.Set("mon_queries_failed", mon_queries_failed.load()); + MonitoringStore.Set("read_queries_failed", read_queries_failed); + MonitoringStore.Set("reps_sent", reps_sent); + MonitoringStore.Set("rep_send_fails", rep_send_fails); + MonitoringStore.Set("multicasts_sent", multicasts_sent); + MonitoringStore.Set("multicast_send_fails", multicast_send_fails); + MonitoringStore.Set("mm_broadcasts_sent", mm_broadcasts_sent); + MonitoringStore.Set("mm_broadcasts_failed", mm_broadcasts_failed); + MonitoringStore.Set("master_clashes", master_clashes); + MonitoringStore.Set("master_clashes_failed", master_clashes_failed); + MonitoringStore.Set("standby_clashes", standby_clashes); + MonitoringStore.Set("standby_clashes_failed", standby_clashes_failed); + MonitoringStore.Set("self_promotions", self_promotions); + MonitoringStore.Set("self_promotions_failed", self_promotions_failed); + MonitoringStore.Set("promotions", promotions); + MonitoringStore.Set("promotions_failed", promotions_failed); + MonitoringStore.Set("demotions", demotions); + MonitoringStore.Set("demotions_failed", demotions_failed); + MonitoringStore.Set("dropped_writes", dropped_writes); + MonitoringStore.Set("dropped_reads", dropped_reads); + MonitoringStore.Set("dropped_resps", dropped_resps); + MonitoringStore.Set("dropped_log_in", dropped_log_in); + MonitoringStore.Set("dropped_mon_in", dropped_mon_in); + MonitoringStore.Set("dropped_logs_out", dropped_logs_out); + MonitoringStore.Set("dropped_monitoring_out", dropped_monitoring_out); + MonitoringStore.Set("read_query_rate", read_query_rate); + MonitoringStore.Set("write_query_rate", write_query_rate); + + // convert Store into a json + std::string json_stats; + MonitoringStore >> json_stats; + + // update the web page status + // actually, this only supports a single word, with no spaces? + std::stringstream status; + status << " read qrys (rcvd/rcv errs/qry errs):["<SetValue(status.str()); + + /* + // temporarily bypass the database logging level to ensure it gets sent to the monitoring db. + int db_verbosity_tmp = db_verbosity; + db_verbosity = 10; + Log(Concat("Monitoring Stats:",json_stats),15); + db_verbosity = db_verbosity_tmp; + */ + + /* + std::string sql_qry = "INSERT INTO monitoring ( time, device, subject, data ) VALUES ( 'now()', '" + + my_id+"','stats','"+json_stats+"' );"; + */ + + std::string multicast_msg = "{ \"topic\":\"monitoring\"" + ", \"subject\":\"stats\"" + ", \"device\":\""+escape_json(my_id)+"\"" + + ", \"time\":"+std::to_string(time(nullptr)*1000) // ms since unix epoch + + ", \"data\":\""+json_stats+"\" }"; + + if(am_master){ + in_mon_queue_mtx.lock(); + in_mon_queue.push_back(multicast_msg); + in_mon_queue_mtx.unlock(); + } else { + out_multicast_queue.push_back(multicast_msg); // FIXME FIXME FIXME needs to go to mon port + } + + // reset counters + last_send = std::chrononow(); + + min_loop_ms=9999999; + max_loop_ms=0; + loops=0; + + } + + return true; +} + +// ««-------------- ≪ °◇◆◇° ≫ --------------»» + +bool ReceiveSQL::ResetStats(bool reset){ + if(!reset) return true; + + min_loop_ms=0; + max_loop_ms=0; + loops=0; + write_queries_recvd=0; + write_query_recv_fails=0; + read_queries_recvd=0; + read_query_recv_fails=0; + logs_recvd=0; + mons_recvd=0; + log_recv_fails=0; + mon_recv_fails=0; + mm_broadcasts_recvd=0; + mm_broadcast_recv_fails=0; + write_queries_failed=0; + log_queries_failed=0; + mon_queries_failed=0; + read_queries_failed=0; + reps_sent=0; + rep_send_fails=0; + multicasts_sent=0; + multicast_send_fails=0; + mm_broadcasts_sent=0; + mm_broadcasts_failed=0; + master_clashes=0; + master_clashes_failed=0; + standby_clashes=0; + standby_clashes_failed=0; + self_promotions=0; + self_promotions_failed=0; + promotions=0; + promotions_failed=0; + demotions=0; + demotions_failed=0; + dropped_writes=0; + dropped_reads=0; + dropped_resps=0; + dropped_log_in=0; + dropped_mon_in=0; + dropped_logs_out=0; + dropped_monitoring_out=0; + + MonitoringStore.Set("write_queries_recvd", 0); + MonitoringStore.Set("read_queries_recvd", 0); + + last_stats_calc = boost::posix_time::microsec_clock::universal_time(); + std::string timestring; + TimeStringFromUnixSec(0, timestring); + SC_vars["ResetStats"]->SetValue(false); + + return true; +} diff --git a/UserTools/Monitoring/Monitoring.h b/UserTools/Monitoring/Monitoring.h new file mode 100644 index 0000000..195e6cc --- /dev/null +++ b/UserTools/Monitoring/Monitoring.h @@ -0,0 +1,44 @@ +#ifndef Monitoring_H +#define Monitoring_H + +#include +#include +#include +#include + +#include "Tool.h" + + +/** +* \class Monitoring +* +* This Tool sends out statistics to assist with performance monitoring and debugging +* +* $Author: Marcus O'Flaherty $ +* $Date: 2025/12/11 $ +* Contact: marcus.o-flaherty@warwick.ac.uk +*/ + +struct PubReceiver_args : public Thread_args { + + DataModel* m_data; + std::chrono::time_point last_send; + std::chrono::milliseconds monitoring_period_ms; + std::stringstream ss; + +} + +class Monitoring: public Tool { + public: + Monitoring(); ///< Simple constructor + bool Initialise(std::string configfile,DataModel &data); ///< Initialise Function for setting up Tool resorces. @param configfile The path and name of the dynamic configuration file to read in. @param data A reference to the transient data class used to pass information between Tools. + bool Execute(); ///< Execute function used to perform Tool purpose. + bool Finalise(); ///< Finalise function used to clean up resources. + + private: + Thread_args thread_args; + +}; + + +#endif diff --git a/UserTools/Monitoring/README.md b/UserTools/Monitoring/README.md new file mode 100644 index 0000000..ff96ec0 --- /dev/null +++ b/UserTools/Monitoring/README.md @@ -0,0 +1,19 @@ +# TrackStats + +TrackStats + +## Data + +Describe any data formats TrackStats creates, destroys, changes, analyzes, or its usage. + + + + +## Configuration + +Describe any configuration variables for TrackStats. + +``` +param1 value1 +param2 value2 +``` diff --git a/UserTools/MulticastReceiverSender/MulticastReceiverSender.cpp b/UserTools/MulticastReceiverSender/MulticastReceiverSender.cpp new file mode 100644 index 0000000..35cf6f5 --- /dev/null +++ b/UserTools/MulticastReceiverSender/MulticastReceiverSender.cpp @@ -0,0 +1,331 @@ +#include "MulticastReceiverSender.h" + +MulticastReceiverSender::MulticastReceiverSender():Tool(){} + + +bool MulticastReceiverSender::Initialise(std::string configfile, DataModel &data){ + + if(configfile!="") m_variables.Initialise(configfile); + //m_variables.Print(); + + m_data= &data; + m_log= m_data->Log; + + /* ----------------------------------------- */ + /* Configuration */ + /* ----------------------------------------- */ + + m_verbose=1; + std::string type_str; // "logging" or "monitoring" + int port = 5000; // shared with service discovery, logging and monitoring + std::string multicast_address; // separate for each + // FIXME slow controls to vary them + int local_buffer_size = 100; + int transfer_ms = 1000; + int poll_timeout_ms = 100; + + m_variables.Get("type",type_str); + if(type_str!="logging" && type_str!="monitoring"){ + Log(m_tool_name+": invalid port type '"+type_str+"'; valid values are 'logging' and 'monitoring'",v_error); + return false; + } + m_variables.Get("verbose",m_verbose); + m_variables.Get("port",port); + if(!m_variables.Get("multicast_address",multicast_address)){ + if(type_str=="logging") multicast_address = "239.192.1.2"; + else multicast_address = "239.192.1.3"; + } + // buffer received messages in a local vector until size exceeds local_buffer_size... + m_variables.Get("local_buffer_size",local_buffer_size); + // ... or time since last transfer exceeds transfer_ms + m_variables.Get("transfer_ms",transfer_ms); + m_variables.Get("poll_timeout_ms",poll_timeout_ms); + + /* ----------------------------------------- */ + /* Socket Setup */ + /* ----------------------------------------- */ + + int socket = socket(AF_INET, SOCK_DGRAM, 0); + if(socket<=0){ + Log(m_tool_name+": Failed to open multicast socket with error "+strerror(errno),v_error); + return false; + } + + // set linger options - do not linger, discard queued messages on socket close + struct linger l; + l.l_onoff = 0; // whether to linger + l.l_linger = 0; // seconds to linger for + get_ok = setsockopt(socket, SOL_SOCKET, SO_LINGER, (char*) &l, sizeof(l)); + if(get_ok!=0){ + Log(m_tool_name+": Failed to set multicast socket linger with error "+strerror(errno),v_error); + return false; + } + + // disable blocking connections to this ip+port fomr TIME_WAIT after closure. + // this is intended to prevent delivery of delayed packets to the wrong application, + // but means a new middleman instance won't be able to bind for 30-120 seconds after another closes. + int a =1; + get_ok = setsockopt(socket, SOL_SOCKET, SO_REUSEADDR, &a, sizeof(a)); + if(get_ok!=0){ + Log(m_tool_name+": Failed to set multicast socket reuseaddr with error "+strerror(errno),v_error); + return false; + } + + // set the socket to non-blocking mode - should be irrelevant as we poll + get_ok = fcntl(socket, F_SETFL, O_NONBLOCK); + if(get_ok!=0){ + Log(m_tool_name+": Failed to set multicast socket to non-blocking with error "+strerror(errno),v_warning); + } + + // format destination address from IP string + struct sockaddr_in addr; + bzero((char *)&addr, sizeof(addr)); // init to 0 + addr.sin_family = AF_INET; + addr.sin_port = htons(port); + + // sending: which multicast group to send to + get_ok = inet_aton(multicast_address.c_str(), &addr.sin_addr); + if(get_ok==0){ // returns 0 if invalid, unlike other functions + Log(m_tool_name+": Bad multicast address '"+multicast_address+"'",v_error); + return false; + } + + // used in sendto / recvfrom methods + socklen_t addrlen = sizeof(addr); + + /* FIXME FIXME FIXME + // for two-way comms, we should bind to INADDR_ANY, not a specific multicast address.... maybe? + struct sockaddr_in multicast_addr2; + bzero((char *)&multicast_addr2, sizeof(multicast_addr2)); // init to 0 + multicast_addr2.sin_family = AF_INET; + multicast_addr2.sin_port = htons(log_port); + multicast_addr2.sin_addr.s_addr = htonl(INADDR_ANY); << like this + */ + + // to listen we need to bind to the socket + get_ok = (bind(socket, (struct sockaddr*)&addr, addrlen) == 0); + if(!get_ok) { + Log(m_tool_name+": Failed to bind to multicast listen socket",v_error); + return false; + } + + // and join a multicast group + struct ip_mreq mreq; + mreq.imr_interface.s_addr = htonl(INADDR_ANY); + get_ok = inet_aton(multicast_address.c_str(), &mreq.imr_multiaddr); + if(get_ok==0){ + Log(m_tool_name+": Bad multicast group '"+multicast_address+"'",v_error); + return false; + } + get_ok = setsockopt(socket, IPPROTO_IP, IP_ADD_MEMBERSHIP, &mreq, sizeof(mreq)); + if(get_ok!=0){ + Log(m_tool_name+": Failed to join multicast group",v_error); + return false; + } + + /* ----------------------------------------- */ + /* Thread Setup */ + /* ----------------------------------------- */ + + thread_args.m_data = m_data; + thread_args.socket = socket; + thread_args.addr = addr; + thread_args.addrlen = addrlen; + thread_args.poll = zmq::pollitem_t{NULL, socket, ZMQ_POLLIN, 0}; + thread_args.poll_timeout_ms = poll_timeout_ms; + thread_args.local_buffer_size = local_buffer_size; + thread_args.in_local_queue = m_data->multicast_buffer_pool.GetNew(local_buffer_size); + thread_args.last_transfer = std::chrononow(); + thread_args.transfer_period_ms = std::chrono::milliseconds{transfer_ms}; + thread_args.in_queue = &m_data->in_multicast_msg_queue; + thread_args.in_queue_mtx = &m_data->in_multicast_msg_queue_mtx; + if(type_str=="logging"){ + // TODO encapsulate these in a socket-receive monitoring struct, w/ method for turning to json + // can be shared across multicast and both zmq socket receivers + { + thread_args.polls_failed = &m_data->log_polls_failed; + thread_args.msgs_rcvd = &m_data->logs_recvd; + thread_args.rcv_fails = &m_data->log_recv_fails; + thread_args.in_buffer_transfers = &m_data->log_in_buffer_transfers; + thread_args.out_buffer_transfers = &m_data->log_out_buffer_transfers; + } + + thread_args.out_queue = &m_data->out_log_msg_queue; + thread_args.out_queue_mtx = &m_data->out_log_msg_queue_mtx; + + thread_crashes = &m_data->log_thread_crashes; + } else { + { + thread_args.polls_failed = &m_data->mon_polls_failed; + thread_args.msgs_rcvd = &m_data->mons_recvd; + thread_args.rcv_fails = &m_data->mon_recv_fails; + thread_args.in_buffer_transfers = &m_data->mon_in_buffer_transfers; + thread_args.out_buffer_transfers = &m_data->mon_out_buffer_transfers; + } + + thread_args.out_queue = &m_data->out_mon_msg_queue; + thread_args.out_queue_mtx = &m_data->out_mon_msg_queue_mtx; + + thread_crashes = &m_data->mon_thread_crashes; + } + type_str+="_sendreceiver"; // thread needs a unique name + m_data->utils.CreateThread(type_str, &Thread, &thread_args); + m_data->num_threads++; + + return true; +} + + +bool MulticastReceiverSender::Execute(){ + + if(!thread_args.running){ + Log(m_tool_name+" Execute found thread not running!",v_error); + Finalise(); + Initialise(); // FIXME should we give up if Initialise returns false? should we set StopLoop to 1? + // FIXME if restarts > X times in last Y mins, alarm (bypass, shove into DB? send to websocket?) and StopLoop. + ++(*thread_crashes); + } + + return true; +} + + +bool MulticastReceiverSender::Finalise(){ + + // signal background receiver thread to stop + Log(m_tool_name+": Joining receiver thread",v_warning); + m_data->utils.KillThread(&thread_args); + Log(m_tool_name+": Finished",v_warning); + m_data->num_threads--; + + std::unique_lock locker(m_data->in_multicast_msg_queue_mtx); + m_data->in_multicast_msg_queue->clear(); + locker.unlock(); + + if(type_str=="logging"){ + locker = std::unique_lock(m_data->out_log_msg_queue_mtx); + m_data->out_log_msg_queue->clear(); + } else { + locker = std::unique_lock(m_data->out_mon_msg_queue_mtx); + m_data->out_mon_msg_queue->clear(); + } + + get_ok = close(socket); + if(get_ok!=0){ + Log(m_tool_name+": Error closing socket "+strerror(errno),v_error); + return false; + } + + return true; +} + +void ReceiveSQL::Thread(Thread_args* arg){ + + MulticastReceive_args* m_args=reinterpret_cast(arg); + DataModel* m_data = m_args->m_data; + + // transfer to datamodel + // ===================== + if(!m_args->in_local_queue->empty() && + ((m_args->in_local_queue->size()>m_args->local_buffer_size) || + (m_args->last_transfer - std::chrononow()) > transfer_period_ms) ){ + + std::unique_lock locker(m_args->in_queue_mtx); + m_args->in_queue->push_back(m_args->in_local_queue); + locker.unlock(); + + m_args->in_local_queue = m_data->multicast_buffer_pool.GetNew(local_buffer_size); + + m_args->Log(m_tool_name+": added "+std::to_string(m_args->in_local_queue.size()) + +" messages to datamodel",5); // FIXME streamline + m_args->last_transfer = std::chrononow(); + ++(*m_args->in_buffer_transfers); + } + + + // poll + // ==== + try { + get_ok = zmq::poll(&m_args->poll, 1, m_args->poll_timeout_ms); + } catch(zmq::error_t& err){ + // ignore poll aborting due to signals + if(zmq_errno()==EINTR) return; + std::cerr<running=false; // FIXME Handle other errors? or just globally via restarting thread? + ++(*m_args->polls_failed); + return; + } + catch(...){ + std::cerr<running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? + ++(*m_args->polls_failed); + return; + } + if(get_ok<0){ + std::cerr<running=false; // FIXME Handle other errors? or just globally via restarting thread? + ++(*m_args->polls_failed); + return; + } + + // read + // ==== + if(m_args->poll.revents & ZMQ_POLLIN){ + m_data->Log(m_tool_name+": reading multicast message",10); // FIXME streamline + + // read the messge FIXME name max num bytes in multicast message + m_args->get_ok = recvfrom(m_args->socket, m_args->message, 655355, 0, &m_args->addr, &m_args->addrlen); + if(m_args->get_ok <= 0){ + ++(*m_args->rcv_fails); + // FIXME better logging + std::cerr<addr->sin_addr)} // FIXME is this valid on failure? + <<" with error "<msgs_rcvd); + m_data->Log(m_tool_name+": Received multicast message '"+std::string(m_args->message) + +"' from "+std::string{inet_ntoa(&m_args->addr->sin_addr)},12); // FIXME streamline + + m_args->in_local_queue->emplace_back(m_args->message); + + } + } + + // write + // ===== + if(!m_args->out_local_queue.empty()){ + + // Get the message + std::string& message = out_local_queue.front(); + + // send it + int cnt = sendto(m_args->socket, message.c_str(), message.length()+1, 0, &m_args->addr, m_args->addrlen); + + // check success + if(cnt < 0){ + m_data->Log(m_tool_name+": Error sending multicast message: "+strerror(errno),v_error); // FIXME ensure this isn't circular + m_args->out_local_queue.pop_front(); // FIXME discard it anyway? or maybe don't until it succeeds? + ++(*m_args->send_fails); + + } else { + m_args->out_local_queue.pop_front(); + ++(*m_args->msgs_sent); + + } + + } else { + + // else see if there are any in datamodel to grab + std::unique_lock locker(m_args->out_queue_mtx); + if(!m_args->out_queue->empty()){ + std::swap(m_args->out_queue, m_args->out_local_queue); + ++(*m_args->out_buffer_transfers); + } + locker.unlock(); + + } + + return; +} diff --git a/UserTools/MulticastReceiverSender/MulticastReceiverSender.h b/UserTools/MulticastReceiverSender/MulticastReceiverSender.h new file mode 100644 index 0000000..b575371 --- /dev/null +++ b/UserTools/MulticastReceiverSender/MulticastReceiverSender.h @@ -0,0 +1,68 @@ +#ifndef MulticastReceiverSender_H +#define MulticastReceiverSender_H + +#include +#include +#include +// multicast +#include +#include +#include +#include +#include + +#include "Tool.h" +#include "DataModel.h" + +/** + * \class MulticastReceiverSender + * + * This Tool receives and sends logging or monitoring (multicast) messages via a thread, pushing them to/pulling them from the DataModel. + * + * $Author: M. O'Flaherty $ + * $Date: 2025/11/26 $ + * Contact: marcus.o-flaherty@warwick.ac.uk +*/ + +// class for things passed to multicast listener thread +struct MulticastReceive_args : public Thread_args { + + DataModel* m_data; + socklen_t addrlen; + struct sockaddr_in addr; + int socket; + int poll_timeout_ms; + zmq::pollitem_t poll; + char message[655355]; // theoretical maximum UDP buffer size - size also hard-coded in thread + int get_ok; + size_t local_buffer_size; + std::vector in_local_queue; + std::vector out_local_queue; + + std::vector* in_queue; + std::mutex in_queue_mtx; + std::deque* out_queue; + std::mutex out_queue_mtx; + + std::chrono::time_point last_transfer; + std::chrono::milliseconds transfer_period_ms; + +}; + +class MulticastReceiverSender: public Tool { + + public: + MulticastReceiverSender(); ///< Simple constructor + bool Initialise(std::string configfile,DataModel &data); ///< Initialise Function for setting up Tool resorces. @param configfile The path and name of the dynamic configuration file to read in. @param data A reference to the transient data class used to pass information between Tools. + bool Execute(); ///< Executre function used to perform Tool perpose. + bool Finalise(); ///< Finalise funciton used to clean up resorces. + + private: + static void Thread(Thread_args* args); + MulticastReceive_args thread_args; + + int get_ok; /// FIXME check usage + +}; + +#endif diff --git a/UserTools/MulticastReceiverSender/README.md b/UserTools/MulticastReceiverSender/README.md new file mode 100644 index 0000000..e410e74 --- /dev/null +++ b/UserTools/MulticastReceiverSender/README.md @@ -0,0 +1,19 @@ +# MulticastReceiver + +MulticastReceiver + +## Data + +Describe any data formats MulticastReceiver creates, destroys, changes, analyzes, or its usage. + + + + +## Configuration + +Describe any configuration variables for MulticastReceiver. + +``` +param1 value1 +param2 value2 +``` diff --git a/UserTools/MulticastWorkers/MulticastWorkers.cpp b/UserTools/MulticastWorkers/MulticastWorkers.cpp new file mode 100644 index 0000000..97ca46a --- /dev/null +++ b/UserTools/MulticastWorkers/MulticastWorkers.cpp @@ -0,0 +1,390 @@ +#include "MulticastWorkers.h" + +MulticastWorkers::MulticastWorkers():Tool(){} + + +bool MulticastWorkers::Initialise(std::string configfile, DataModel &data){ + + if(configfile!="") m_variables.Initialise(configfile); + //m_variables.Print(); + + m_data= &data; + m_log= m_data->Log; + + // allocate ehhh 60% of the CPU to multicast workers + int max_workers= (double(std::thread::hardware_concurrency())*0.6); + + if(!m_variables.Get("verbose",m_verbose)) m_verbose=1; +// m_variables.Get("max_workers",max_workers); + + // potentially we will have a dedicated worker pool for multicast, but for now, + // just one created and managed by JobManager Tool + //job_manager = new WorkerPoolManager(multicast_jobs, &max_workers, 0, 0, 0, true, true); + + thread_args.m_data = m_data; + m_data->utils.CreateThread("multicast_job_distributor", &Thread, &thread_args); // thread needs a unique name + m_data->num_threads++; + + return true; +} + +bool MulticastWorkers::Execute(){ + + // FIXME ok but actually this kills all our jobs, not just our job distributor + // so we don't want to do that. + if(!thread_args.running){ + Log(m_tool_name+" Execute found thread not running!",v_error); + Finalise(); + Initialise(); // FIXME should we give up if Initialise returns false? should we set StopLoop to 1? + ++m_data->multicast_job_distributor_thread_crashes; + } + + return true; +} + +bool MulticastWorkers::Finalise(){ + + // signal job distributor thread to stop + Log(m_tool_name+": Joining receiver thread",v_warning); + m_data->utils.KillThread(&thread_args); + Log(m_tool_name+": Finished",v_warning); + m_data->num_threads--; + + // this will invoke kill on the WorkerPoolManager thread creating worker threads, as well as all workers. + //delete job_manager; + + return true; +} + + +void MulticastWorkers::Thread(Thread_args* args){ + + MulticastJobDistributor_args* m_args = reinterpret_cast(args); + m_args->local_msg_queue.clear(); + + // grab any batches of logging/monitoring messages + std::unique_lock locker(m_args->m_data->in_multicast_msg_queue_mtx); + if(!m_args->m_data->in_multicast_msg_queue.empty()){ + std::swap(m_args->m_data->in_multicast_msg_queue, m_args->local_msg_queue); + } + locker.unlock(); + + // add a job for each batch to the queue + for(int i=0; ilocal_msg_queue.size(); ++i){ + + // add a new Job to the job queue to process this data + Job* the_job = m_args->job_pool.GetNew(&m_args->m_data->job_pool, "multicast_worker"); + if(the_job->data == nullptr){ + // on first creation of the job, make it a JobStruct to encapsulate its data + // N.B. Pool::GetNew will only invoke the constructor if this is a new instance, + // (not if it's been used before and then returned to the pool) + // so don't pass job-specific variables to the constructor + the_job->data = m_args->job_struct_pool.GetNew(&m_args->job_struct_pool, m_args->m_data); + } else { + // this should never happen as jobs should return their args to the pool + std::cerr<<"Multicast Job with non-null data pointer!"<(the_job->data); + job_data->msg_buffer = m_args->local_msg_queue[i]; + + the_job->func = MulticastMessageJob; + the_job->fail_func = MulticastMessageFail; + + //multicast_jobs.AddJob(the_job); + m_data->job_queue.AddJob(the_job); + + } + + return true; +} + + +// ««-------------- ≪ °◇◆◇° ≫ --------------»» + +void MulticastWorkers::MulticastMessageFail(void*& arg){ + + // safety check in case the job somehow fails after returning its args to the pool + if(arg==nullptr){ + return; // FIXME log this occurrence? + } + + MulticastJobStruct* m_args=reinterpret_cast(arg); + ++(*m_args->m_data->multicast_worker_job_fails); + + // return the vector of string buffers to the pool for re-use by MulticastReceiverSender Tool + m_args->msg_buffer->clear(); + m_args->m_data->multicast_buffer_pool.Add(m_args->msg_buffer); + + // return our job args to the pool + m_args->m_pool.Add(m_args); + m_args = nullptr; // clear the local m_args variable... not strictly necessary + arg = nullptr; // clear the job 'data' member variable + + // FIXME do something here + // we could also try to insert the buffers into the queues for downstream, + // if there were preceding messages that were succesfully added. + // but we don't know where we failed, so that could be risky. + // we could keep track of where we were in m_args and: + // 1. log the specific message we were trying to process when the job failed + // 2. submit the data we already have + // 3. make a new job for the remaining data + +} + +// ««-------------- ≪ °◇◆◇° ≫ --------------»» + +// Each job takes a vector of messages and converts them into a suitable object, +// then locks and inserts that into a datamodel vector for the database workers +void MulticastWorkers::MulticastMessageJob(void*& arg){ + + MulticastJobStruct* m_args=reinterpret_cast(arg); + + // most efficient way to do insertion would seem to be via jsonb_to_recordset, which allows batching queries, + // query optimisation similar to 'unnest', and avoids the overhead of parsing the JSON: e.g. + // psql -c "INSERT INTO logging ( time, device, severity, message ) SELECT * FROM + // jsonb_to_recordset('[ {\"time\":\"2025-12-01 12:31\", \"device\":\"dev1\", \"severity\":1, \"message\":\"blah\"}, + // {\"time\":\"2025-12-02 15:25\", \"device\":\"dev2\", \"severity\":2, \"message\":\"arg\"} ]') + // as t(time timestamptz, device text, severity int, message text);" << (this part is needed) + + // or: + // PREPARE loginsert ( text ) AS INSERT INTO logging ( time, device, severity, message ) SELECT * FROM jsonb_to_recordset( $1::jsonb ) as t(time timestamptz, device text, severity int, message text); + // then: + // execute loginsert('[ {"time":"2025-12-01 12:31", "device":"dev1", "severity":1, "message":"blah"}, {"time":"2025-12-02 15:25", "device":"dev2", "severity":2, "message":"oooh"} ]'); + + // subsequently, all we need to do here is concatenate the JSONs + + m_args->logging_buffer = "["; + m_args->monitoring_buffer = "["; + m_args->rootplot_buffer = "["; + m_args->plotlyplot_buffer = "["; + + // loop over messages + for(std::string& next_msg : *m_args->msg_buffer){ + + // we can't batch insertions destined for different tables, + // so keep each message type (topic) in a different buffer. + // the Services class always puts the topic first, + // and all topics start with a unique character (XXX for now?), + // so we don't need to parse the message to identify the topic: + if(next_msg.substr(0,9)!="{\"topic\":"){ + // FIXME log it as bad multicast + continue; + } + + switch(query_topic{next_msg[10]}){ + case query_topic::logging: + m_args->out_buffer = m_args->logging_buffer; + break; + case query_topic::monitoring: + m_args->out_buffer = m_args->monitoring_buffer; + break; + case query_topic::rootplot: + m_args->out_buffer = m_args->rootplot_buffer; + break; + case query_topic::plotlyplot: + m_args->out_buffer = m_args->plotlyplot_buffer; + break; + default: + continue; // FIXME unknown topic: error log it. + } + + if(m_args->out_buffer.length()>1) m_args->out_buffer += ", "; + m_args->out_buffer += next_msg; + + ++(*m_args->m_data->n_multicasts_processed); // FIXME add split by topic + + } + + // pass into datamodel for DatabaseWorkers + if(m_args->logging_buffer.length()!=1){ + m_args->logging_buffer += "]"; + std::unique_lock locker(m_args->log_query_queue_mtx); + m_args->m_data->log_query_queue.push_back(m_args->logging_buffer); + } + + if(m_args->monitoring_buffer.length()!=1){ + m_args->monitoring_buffer += "]"; + std::unique_lock locker(m_args->mon_query_queue_mtx); + m_args->m_data->mon_query_queue.push_back(m_args->monitoring_buffer); + } + + if(m_args->rootplot_buffer.length()!=1){ + m_args->rootplot_buffer += "]"; + std::unique_lock locker(m_args->rootplot_query_queue_mtx); + m_args->m_data->rootplot_query_queue.push_back(m_args->rootplot_buffer); + } + + if(m_args->plotlyplot_buffer.length()!=1){ + m_args->plotlyplot_buffer += "]"; + std::unique_lock locker(m_args->plotlyplot_query_queue_mtx); + m_args->m_data->plotlyplot_query_queue_mtx.push_back(m_args->plotlyplot_buffer); + } + + // return the vector of string buffers to the pool for re-use by MulticastReceiverSender Tool + m_args->msg_buffer->clear(); + m_args->m_data->multicast_buffer_pool.Add(m_args->msg_buffer); + + ++(*m_args->m_data->multicast_worker_job_successes); + + m_args->m_pool.Add(m_args); // return our job args to the job args struct pool + m_args = nullptr; // clear the local m_args variable... not strictly necessary + arg = nullptr; // clear the job 'data' member variable + + return; + +} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +/* +// // used with v0 batching +// static const std::string log_base = "INSERT INTO logging ( time, device, severity, message ) VALUES "; +// static const std::string mon_base = "INSERT INTO monitoring ( time, device, subject, data ) VALUES "; + +// Each job takes a vector of messages and converts them into a suitable object, +// then locks and inserts that into a datamodel vector for the database workers +void MulticastWorkers::MulticastMessageJob(void* arg){ + + //================= + // v0: combine all multicasts into a batch sql query (curent version) + // note! supposedly cannot use this with pqxx::pipeline (see 'pqxx::pipeline::insert') + // although if we're not interested in any return values, maybe it's ok...? + + // v1: insert into pipeline - does batching for you, so maybe equivalent to v0? + // accepts a std::stringview of a query, so still need to do sanitization yourself, + // and be mindful of lifetime of the query you pass it! + + // v2: turn each multicast into a form suitable for use with pqxx::stream + // the fastest method is pqxx::stream::write_values(T...) which accepts a set of variables + // less preferred is write_row or operator<< both of which accept a container or tuple + + // v3: turn each multicast message into a pqxx::params object to be used with a prepared statement (pqxx::prepped) + + // v4: transpose the data and use unnest to pass multiple rows as a set of columns + // this should be close in performance to COPY (stream) + // psql -c "INSERT INTO logging ( time, device, severity, message ) SELECT * FROM + // UNNEST(ARRAY['2025-12-01 12:31', '2025-12-02 15:23']::timestamptz[], + // ARRAY['dev1', 'dev2'], + // '{1,2}'::int[], << alternative way to define an array of ints (note bracket change) + // '{\"blah\", \"argh\"}'::text[])" << for array of strings need internal quoting + + // v5: just insert the JSON directly 5-head + // psql -c "INSERT INTO logging ( time, device, severity, message ) SELECT * FROM + // jsonb_to_recordset('[ {\"time\":\"2025-12-01 12:31\", \"device\":\"dev1\", \"severity\":1, \"message\":\"blah\"}, + // {\"time\":\"2025-12-02 15:25\", \"device\":\"dev2\", \"severity\":2, \"message\":\"arg\"} ]') + // as t(time timestamptz, device text, severity int, message text);" << this part is needed + + PREPARE moninsert ( text ) as INSERT INTO monitoring ( time, device, subject, data ) select * from jsonb_to_recordset( $1::jsonb ) as t(time timestamptz, device text, subject text, data jsonb ); + execute moninsert('[ {"time":"2025-12-03 12:22", "device":"dev3", "subject":"test", "data":{"testkey":"testval", "key2":3} }, {"time":"2025-12-03 13:23", "device":"dev3", "subject":"test", "data":{"testkey":"testval2", "key2":4} } ]' ); + + //================== + + MulticastJobStruct* m_args=reinterpret_cast(arg); + + // v0: pre-populate query with base + m_args->out_buffer = m_args->query_base; + + // loop over messages, parse each into an SQL query + for(std::string& next_msg : *m_args->msg_buffer){ + +// // parse message +// thread_local MulticastMsg msg; +// msg.Clear(); +// if(!msg.Parse(message)){ +// Log("MulticastMessageToQuery error parsing message json '"+message+"'",v_error); // FIXME track, report +// continue; +// } + + // we can't batch insert of records into different tables + // so keep each message type (topic) in a different buffer +// if(msg.topic=="logging"){ +// m_args->out_buffer = m_args->logging_buffer; +// } else if(topic=="monitoring"){ +// m_args->out_buffer = m_args->monitoring_buffer; +// } else if(topic=="rootplot"){ +// m_args->out_buffer = m_args->rootplot_buffer; +// } else if(msg.topic=="plotlyplot"){ +// m_args->out_buffer = m_args->plotlyplot_buffer; +// } + +// // v0: concatenate to batch query +// m_args->out_buffer += msg.GetString(m_args->first_vals); // FIXME: sanitization + +// // v1: insert into pipeline +// m_args->out_buffer.insert(msg.GetString()); // FIXME lifetime of this string needs to persist.... how to do? + +// // v2: append to queue of tuples for stream // n.b. need to split into GetMonitoringTuple/GetLoggingTuple +// m_args->out_buffer->push_back(msg.GetTuple()); // because the tuple types are different +// or +// m_args->out_buffer->push_back(msg); // what's the deal here? +// // well the preferred way to use a stream is pqxx::stream_to(a, b c) for variables a,b,c +// // we implement this via Msg::StreamRow(pqxx::stream_to), but this Tool doesn't have the pqxx::stream_to +// +// // v3: append to queue of pqxx::params objects for prepared statement +// m_args->out_buffer->push_back(msg.GetParams()); + +// // v4: unnest +// m_args->out_buffer.Append(msg); +// // it's going to be some kind of struct that internally has strings for a list of timestamps, +// // device names, severities and messages. Append adds this messages' new values to each. + + ++m_args->n_queries; // FIXME make atomic, stats tracking + + } + +// // v0: terminate this batch with semicolon +// m_args->out_buffer += ";"; + + ... + +} +*/ diff --git a/UserTools/MulticastWorkers/MulticastWorkers.h b/UserTools/MulticastWorkers/MulticastWorkers.h new file mode 100644 index 0000000..d6506c0 --- /dev/null +++ b/UserTools/MulticastWorkers/MulticastWorkers.h @@ -0,0 +1,62 @@ +#ifndef MulticastWorkers_H +#define MulticastWorkers_H + +#include + +#include "Tool.h" +#include "DataModel.h" + + +/** +* \class MulticastWorkers +* +* This Tool uses a worker pool to process batches of multicast messages (received in JSON format), separates them based on their topic (i.e. destination table) and prepares them for insertion into the database by database workers. This preparation may include batching messages, decoding the JSON into SQL, extraction of JSON variables into parameter packs, etc. Presently, it batches the JSON for use with postgres jsonb_to_recordset. +* +* $Author: M. O'Flaherty $ +* $Date: 2025/12/04 $ +* Contact: marcus.o-flaherty@warwick.ac.uk +*/ + +// class for things passed to multicast worker threads +struct MulticastJobStruct { + + MulticastJobStruct(Pool* pool, DataModel* data) : m_pool(pool) m_data(data){}; + Pool* m_pool; + DataModel* m_data; + std::vector* msg_buffer; + std::string out_buffer; + +}; + +struct MulticastJobDistributor_args : Thread_args { + + DataModel* m_data; + std::vector*> local_msg_queue; // swap with datamodel and then pass out to jobs + Pool job_struct_pool(true, 1000, 100); ///< pool for job objects used by worker threads + +}; + +class MulticastWorkers: public Tool { + + public: + MulticastWorkers(); ///< Simple constructor + bool Initialise(std::string configfile,DataModel &data); ///< Initialise Function for setting up Tool resorces. @param configfile The path and name of the dynamic configuration file to read in. @param data A reference to the transient data class used to pass information between Tools. + bool Execute(); ///< Executre function used to perform Tool perpose. + bool Finalise(); ///< Finalise funciton used to clean up resorces. + + private: + static bool Thread(Thread_args* args); ///< job distributor thread function that pulls batches of multicast messages from upstream and passes them to the job queue + MulticastJobDistributor_args thread_args; ///< args for the child thread that produces and distributes jobs to the worker farm + + static void MulticastMessageJob(void*& arg); ///< job function that prepares a batch of multicast messages for DB entry + static void MulticastMessageFail(void*& arg); ///< job fail function, perform cleanup to return multicast buffer and job args struct to their respective Pools + + // for now use shared ones in datamodel + //WorkerPoolManager* job_manager=nullptr; ///< manager for worker farm, has internal background thread that spawns new jobs and or prunes them, along with tracking statistics + //JobQueue multicast_jobs; ///< job queue for worker farm + +}; + + + +#endif diff --git a/UserTools/MulticastWorkers/README.md b/UserTools/MulticastWorkers/README.md new file mode 100644 index 0000000..5ae3ac4 --- /dev/null +++ b/UserTools/MulticastWorkers/README.md @@ -0,0 +1,19 @@ +# MulticastWorkers + +MulticastWorkers + +## Data + +Describe any data formats MulticastWorkers creates, destroys, changes, analyzes, or its usage. + + + + +## Configuration + +Describe any configuration variables for MulticastWorkers. + +``` +param1 value1 +param2 value2 +``` diff --git a/UserTools/ReadQueryReceiverReplySender/README.md b/UserTools/ReadQueryReceiverReplySender/README.md new file mode 100644 index 0000000..11b9561 --- /dev/null +++ b/UserTools/ReadQueryReceiverReplySender/README.md @@ -0,0 +1,19 @@ +# ReadReceiverReplySender + +ReadReceiverReplySender + +## Data + +Describe any data formats ReadReceiverReplySender creates, destroys, changes, analyzes, or its usage. + + + + +## Configuration + +Describe any configuration variables for ReadReceiverReplySender. + +``` +param1 value1 +param2 value2 +``` diff --git a/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.cpp b/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.cpp new file mode 100644 index 0000000..718a107 --- /dev/null +++ b/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.cpp @@ -0,0 +1,326 @@ +#include "ReadReceiverReplySender.h" + +ReadReceiverReplySender::ReadReceiverReplySender():Tool(){} + +//FIXME call it readqueryreceviverandreplysender +bool ReadReceiverReplySender::Initialise(std::string configfile, DataModel &data){ + + if(configfile!="") m_variables.Initialise(configfile); + //m_variables.Print(); + + m_data= &data; + m_log= m_data->Log; + + if(!m_variables.Get("verbose",m_verbose)) m_verbose=1; + + /* ----------------------------------------- */ + /* Configuration */ + /* ----------------------------------------- */ + + port_name = "db_read"; + // FIXME do these timeouts need to be << transfer_period_ms? + int rcv_timeout_ms=500; + int snd_timeout_ms=500; + int poll_timeout_ms=500; + int rcv_hwm=10000; + int conns_backlog=1000; // FIXME sufficient? + int local_buffer_size = 200; + int transfer_period_ms = 200; + + m_variables.Get("snd_timeout",snd_timeout_ms); + m_variables.Get("rcv_timeout",rcv_timeout_ms); + m_variables.Get("poll_timeout_ms",poll_timeout_ms); + m_variables.Get("port_name", port_name); + m_variables.Get("rcv_hwm", rcv_hwm); // max num outstanding messages in receive buffer + m_variables.Get("conns_backlog", conns_backlog); // max num oustanding connection requests + m_variables.Get("local_buffer_size", local_buffer_size); + m_variables.Get("transfer_period_ms", transfer_period_ms); + + /* ----------------------------------------- */ + /* Socket Setup */ + /* ----------------------------------------- */ + + // A ROUTER socket is used for read queries as it naturally load balances + // (since read queries can be handled by both master/slave middlemen and will be round-robined between them) + // and is also used to asynchronously send both read and write query acknowledgements/replies + + ManagedSocket* managed_socket = new ManagedSocket; + managed_socket->service_name=""; // attach to any client type... + managed_socket->port_name = port_name; // ...that advertises a service on port 'port_name' + managed_socket->socket = new zmq::socket_t(*m_data->context, ZMQ_ROUTER); + managed_socket->socket->setsockopt(ZMQ_SNDTIMEO, snd_timeout_ms); + managed_socket->socket->setsockopt(ZMQ_RCVTIMEO, rcv_timeout_ms); + managed_socket->socket->setsockopt(ZMQ_RCVHWM,rcv_hwm); + managed_socket->socket->setsockopt(ZMQ_BACKLOG,conns_backlog); + managed_socket->socket->setsockopt(ZMQ_LINGER, 10); + // make reply socket error, rather than silently drop, if the destination is unreachable + managed_socket->socket->setsockopt(ZMQ_ROUTER_MANDATORY, 1); // FIXME do we want this? + // make router transfer connections with an already seen ZMQ_IDENTITY to a new connection + // rather than rejecting the new connection attempt + // FIXME need to update ZMQ version to enable, but we should do this + /* + try{ + managed_socket->socket->setsockopt(ZMQ_ROUTER_HANDOVER, 1); + } catch(std::exception& e){ + std::cout<<"caught "<socket; // FIXME get from struct. + thread_args.socket_mtx = managed_socket->socket_mtx; // FIXME get from struct. For sharing socket with SocketManager + thread_args.poll_timeout_ms = poll_timeout_ms; + thread_args.polls.emplace_back(*socket,0,ZMQ_POLLIN,0); + thread_args.polls.emplace_back(*socket,0,ZMQ_POLLOUT,0); + thread_args.in_local_queue = m_data->querybatch_pool.GetNew(); + thread_args.in_local_queue.reserve(local_buffer_size); + thread_args.local_buffer_size = local_buffer_size; + thread_args.transfer_period_ms = transfer_period_ms; + + // add the socket to the datamodel for the SocketManager, which will handle making new connections to clients + std::unique_lock locker(m_data->managed_sockets_mtx); + m_data->managed_sockets[port_name] = managed_socket; + locker.unlock(); + + m_args->in_local_queue = m_data->rdmsg_buffer_pool.GetNew(local_buffer_size); + thread_args.make_new = true; + m_data->utils.CreateThread("readrep_sendreceiver", &Thread, &thread_args); // thread needs a unique name + m_data->num_threads++; + + return true; +} + + +bool ReadReceiverReplySender::Execute(){ + + if(!thread_args.running){ + Log(m_tool_name+" Execute found thread not running!",v_error); + Finalise(); + Initialise(); // FIXME should we give up if Initialise returns false? should we set StopLoop to 1? + ++m_data->read_rcv_thread_crashes; + } + + if(m_data->managed_sockets.count(port_name)){ + std::unique_lock lock(m_data->managed_sockets_mtx); + ManagedSocket* sock = m_data->managed_sockets[port_name]; + m_data->managed_sockets.erase(port_name); + locker.unlock(); + if(sock->socket) delete sock->socket; // destructor closes socket + delete sock; + } + + return true; +} + + +bool ReadReceiverReplySender::Finalise(){ + + // signal background receiver thread to stop + Log(m_tool_name+": Joining receiver thread",v_warning); + m_data->utils.KillThread(&thread_args); + Log(m_tool_name+": Finished",v_warning); + m_data->num_threads--; + + // FIXME ensure we don't interfere with SocketManager? Better to leave that to do deletion in its destructor? + /* + if(managed_socket->socket){ + std::unique_lock lock(managed_socket->socket_mtx); + delete managed_socket->socket; + managed_socket->socket=nullptr; + } + */ + + return true; +} + +// ««-------------- ≪ °◇◆◇° ≫ --------------»» + +void ReadReceiverReplySender::Thread(Thread_args* args){ + + ReadReceiverReplySender_args* m_args = reinterpret_cast(args); + + // transfer to datamodel + // ===================== + if(m_args->in_local_queue.size() >= m_args->local_buffer_size || + (m_args->last_transfer - std::chrononow()) > transfer_period_ms){ + + if(!make_new) pop_back(); + if(!m_args->in_local_queue.empty()){ + + std::unique_lock locker(m_args->m_data->read_msg_queue_mtx); + m_args->m_data->read_msg_queue.push_back(m_args->in_local_queue); + locker.unlock(); + + m_args->in_local_queue = m_args->m_data->querybatch_pool.GetNew(); + m_args->in_local_queue.reserve(m_args->local_buffer_size); + + m_args->m_data->Log(m_tool_name+": added "+std::to_string(next_index) + +" messages to datamodel",5); // FIXME better logging + m_args->last_transfer = std::chrononow(); + m_args->make_new=true; + ++(*m_args->m_data->readrep_in_buffer_transfers); + + } + } + + // poll + // ==== + try { + std::unique_lock lock(m_args->socket_mtx); + get_ok = zmq::poll(&m_args->polls, 2, m_args->poll_timeout_ms); + } catch(zmq::error_t& err){ + // ignore poll aborting due to signals + if(zmq_errno()==EINTR) return; + std::cerr<running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? + ++(*m_args->m_data->readrep_polls_failed); + return; + } // FIXME catch non-zmq errors? can we handle them any better? + catch(...){ + std::cerr<running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? + ++(*m_args->m_data->readrep_polls_failed); + return; + } + if(get_ok<0){ + std::cerr<running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? + ++(*m_args->m_data->readrep_polls_failed); + return; + } + + // read + // ==== + if(m_args->polls[0].revents & ZMQ_POLLIN){ + m_data->Log(">>> got a read query from client",3); // FIXME better logging + + if(m_args->make_new){ + m_args->in_local_queue.emplace_back(); + m_args->make_new = false; + } + ZmqQuery& msg_buf = m_args->in_local_queue.back().queries; + msg_buf.resize(4); + // received parts are [client, topic, msg_id, query] + // reorder parts on receipt as client and msg_id will be left untouched and re-used for response + static constexpr char part_order[4] = {0,2,1,3}; + + std::unique_lock locker(m_args->socket_mtx); + for(m_args->msg_parts=0; m_args->msg_parts<4; ++m_args->msg_parts){ + m_args->get_ok = m_args->socket->recv(&msg_buf[part_order[m_args->msg_parts]]); + if(!m_args->get_ok || !msg_buf[part_order[m_args->msg_parts]].more()) break; + } + + // if there are more than 4 parts, read the remainder to flush the buffer, but discard the message + if(m_args->get_ok && msg_buf[3].more()){ + while(true){ + m_args->socket->recv(&m_args->msg_discard); + ++m_args->msg_parts; + } + } + locker.unlock(); + + // if the read failed, discard the message + if(!m_args->get_ok){ + std::cerr<msg_parts<m_data->readrep_rcv_fails); + return; + } + + // if there weren't 4 parts, discard the message + if(m_args->msg_parts!=4){ + std::cerr<msg_parts<<" part message"<m_data->readrep_bad_msgs); + return; + } + + // else success + m_args->make_new=true; + ++(*m_args->m_data->readrep_msgs_rcvd); + + } // else no messages from clients + + // write + // ===== + m_args->m_data->Log("Size of reply queue is "+ + (m_args->out_local_queue ? std::to_string(m_args->out_local_queue.size()) : std::string{"0"}),10); // FIXME + + // send next response message, if we have one in the queue + if(!m_args->out_local_queue!=nullptr && m_args->out_iout_local_queue->queries.size()){ + + // check we had a listener ready + if(m_args->polls[1].revents & ZMQ_POLLOUT){ + + ZmqQuery& rep = m_args->out_local_queue->queries[m_args->out_i++]; + // FIXME maybe don't pop (increment out_i) until send succeeds? + // FIXME maybe impelement 'retries' mechanism as previously? + + std::unique_lock locker(m_args->socket_mtx); + try { + for(size_t i=0; iget_ok = m_args->socket->send(rep[i], ZMQ_SNDMORE); + if(!m_args->get_ok) break; + } + if(m_args->get_ok) m_args->get_ok = m_args->socket->send(rep[rep.size()-1]); + } catch(zmq::exception_t& e){ + std::cerr<get_ok){ + // remove from the to-send queue + ++(*m_args->m_data->readrep_reps_sent); + //m_args->out_local_queue.pop_front(); // FIXME if we didn't do it before + + } else { + std::cerr<m_data->readrep_rep_send_fails); // FIXME or move into below if we retry? or track both? + /* + if(next_msg.retries>=max_send_attempts){ + resp_queue.erase(resp_queue.begin()->first); + } else { + ++next_msg.retries; + } + */ + } + + } // else no available listeners + + } else { + + // no responses to send - see if there's any in the DataModel + std::unique_lock locker(m_args->m_data->query_replies_mtx); + if(!m_args->m_data->query_replies.empty()){ + + // return our batch to the pool if applicable + if(m_args->out_local_queue!=nullptr){ + m_args->m_data->querybatch_pool.Add(m_args->out_local_queue); + m_args->out_local_queue = nullptr; + } + + // grab a new batch + m_args->out_local_queue = m_args->m_data->query_replies.front(); + m_args->m_data->query_replies.pop_front(); + + *(m_args->m_data->readrep_out_buffer_transfers; + + // start sending from the beginning + m_args->out_i=0; + } + locker.unlock(); + + + } + + return true; +} diff --git a/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.h b/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.h new file mode 100644 index 0000000..a9f4629 --- /dev/null +++ b/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.h @@ -0,0 +1,63 @@ +#ifndef ReadReceiverReplySender_H +#define ReadReceiverReplySender_H + +#include + +#include "Tool.h" +#include "DataModel.h" + + +/** + * \class ReadReceiverReplySender + * + * This Tool gets read queries from a ZMQ ROUTER socket and send replies as well as write query acknowledgements. + * + * $Author: Marcus O'Flaherty $ + * $Date: 2025/11/27 $ + * Contact: marcus.o-flaherty@warwick.ac.uk +*/ + +struct ReadReceiverReplySender_args : public Thread_args { + + DataModel* m_data; + zmq::socket_t* socket=nullptr; + std::mutex* socket_mtx; // for sharing the socket with ServicesManager Tool for finding clients + + int poll_timeout_ms; + std::vector polls; + zmq::message_t msg_discard; + bool make_new; + int msg_parts; + int get_ok; + QueryBatch* in_local_queue; + QueryBatch* out_local_queue; + size_t out_i; ///< which query in the batch is next to sent + + // for received buffer transfers + // FIXME we don't track last time of outgoing buffer transfer? + std::chrono::time_point last_transfer; + std::chrono::milliseconds transfer_period_ms; + size_t local_buffer_size; + +}; + +class ReadReceiverReplySender: public Tool { + + public: + ReadReceiverReplySender(); ///< Simple constructor + bool Initialise(std::string configfile,DataModel &data); ///< Initialise Function for setting up Tool resorces. @param configfile The path and name of the dynamic configuration file to read in. @param data A reference to the transient data class used to pass information between Tools. + bool Execute(); ///< Executre function used to perform Tool perpose. + bool Finalise(); ///< Finalise funciton used to clean up resorces. + + private: + static void Thread(Thread_args* args); + ReadReceiverReplySender_args thread_args; + + std::string port_name; // name by which clients advertise sockets for sending read queries to the DB + +}; + + + + +#endif diff --git a/UserTools/ResultWorkers/README.md b/UserTools/ResultWorkers/README.md new file mode 100644 index 0000000..f094517 --- /dev/null +++ b/UserTools/ResultWorkers/README.md @@ -0,0 +1,19 @@ +# ResultWorkers + +ResultWorkers + +## Data + +Describe any data formats ResultWorkers creates, destroys, changes, analyzes, or its usage. + + + + +## Configuration + +Describe any configuration variables for ResultWorkers. + +``` +param1 value1 +param2 value2 +``` diff --git a/UserTools/ResultWorkers/ResultWorkers.cpp b/UserTools/ResultWorkers/ResultWorkers.cpp new file mode 100644 index 0000000..429572d --- /dev/null +++ b/UserTools/ResultWorkers/ResultWorkers.cpp @@ -0,0 +1,277 @@ +#include "ResultWorkers.h" + +ResultWorkers::ResultWorkers():Tool(){} + + +bool ResultWorkers::Initialise(std::string configfile, DataModel &data){ + + if(configfile!="") m_variables.Initialise(configfile); + //m_variables.Print(); + + m_data= &data; + m_log= m_data->Log; + + if(!m_variables.Get("verbose",m_verbose)) m_verbose=1; + + thread_args.m_data = m_data; + m_data->utils.CreateThread("result_job_distributor", &Thread, &thread_args); + m_data->num_threads++; + + return true; +} + + +bool ResultWorkers::Execute(){ + + // FIXME ok but actually this kills all our jobs, not just our job distributor + // so we don't want to do that. + if(!thread_args.running){ + Log(m_tool_name+" Execute found thread not running!",v_error); + Finalise(); + Initialise(); // FIXME should we give up if Initialise returns false? should we set StopLoop to 1? + ++m_data->result_job_distributor_thread_crashes; + } + + return true; +} + + +bool ResultWorkers::Finalise(){ + + // signal job distributor thread to stop + Log(m_tool_name+": Joining receiver thread",v_warning); + m_data->utils.KillThread(&thread_args); + Log(m_tool_name+": Finished",v_warning); + m_data->num_threads--; + + return true; +} + + +void ResultWorkers::Thread(Thread_args* args){ + + ResultJobDistributor_args* m_args = reinterpret_cast(args); + + // grab a batch of read queries, with results awaiting conversion + std::unique_lock locker(m_args->m_data->read_replies_mtx); + if(!m_args->m_data->read_replies.empty()){ + std::swap(m_args->m_data->read_replies, m_args->local_msg_queue); + } + locker.unlock(); + + // add a job for each batch to the queue + for(int i=0; ilocal_msg_queue.size(); ++i){ + + // add a new Job to the job queue to process this data + Job* the_job = job_pool.GetNew("result_worker"); + if(the_job->data == nullptr){ + // on first creation of the job, make it a JobStruct to encapsulate its data + // N.B. Pool::GetNew will only invoke the constructor if this is a new instance, + // (not if it's been used before and then returned to the pool) + // so don't pass job-specific variables to the constructor + the_job->data = job_struct_pool.GetNew(&job_struct_pool, m_args->m_data); + } else { + // FIXME error + std::cerr<<"result_worker Job with non-null data pointer!"<func = ResultJob; + the_job->fail_func = ResultJobFail; + + ResultJobStruct* job_data = dynamic_cast(the_job->data); + job_data->batch = m_args->local_msg_queue[i]; + + /*ok =*/ m_args->m_data->job_queue.AddJob(the_job); // just checks if you've defined func and first_vals = true; + + } + + // TODO add workers that also call setstatus /setversion on batch jobs and then pass them to send thread? + // maybe we can generalise to setreply if needed, depending on reply format & batching of read queries + // or do we just do this in the connection / reply sender thread(s)? + + return true; +} + +// ««-------------- ≪ °◇◆◇° ≫ --------------»» + +void ResultWorkers::ResultJobFail(void*& arg){ + + // safety check in case the job somehow fails after returning its args to the pool + if(arg==nullptr){ + return; // FIXME log this occurrence? + } + + // FIXME hmm, well, i guess we say the query failed + // - we had the results, but then lost them before sending + + ResultJobStruct* m_args=reinterpret_cast(arg); + ++(*m_args->m_data->result_worker_job_fails); + + // return our job args to the pool + m_args->m_pool.Add(m_args); + m_args = nullptr; // clear the local m_args variable... not strictly necessary + arg = nullptr; // clear the job 'data' member variable + +} + +void ResultWorkers::ResultJob(void*& arg){ + + ResultJobStruct* m_args = reinterpret_cast(arg); + + // for now each job processes a batch, not a set of batches + //for(QueryBatch* batch : m_args->local_msg_queue){ + + // read queries need to have their results interpreted, + // write queries only need to have their insertion success status returned. + // each batch should only contain messages that are either all read or all write. + if(m_args->batch->queries.front().topic()[0]=='R'){ + + // process batch of read queries + + for(ZmqQuery& query : m_args->batch->queries){ + + // set whether the query succeeded or threw an exception + query.setsuccess(uint32_t succeeded); // FIXME FILL + + // returned rows are sent back formatted as JSON, with each row a new zmq::message_t + // resize zmq vector in preparation + query.setresponserows(std::size(query.result)); + + if(query.topic()[2]!=query_topic::generic){ + + // standard queries generated by the libDAQInterface use `row_to_json` + // to request results already packaged up into one JSON per row + // so all we need to do is copy that into the zmq message + for(size_t i=0; itmpval = "{"; + for (pqxx::row::iterator it=query.result[i].begin(); ittmpval += ", "; + m_args->tmpval += "\"" + it->name() + "\":"; + // Field values are returned bare: i.e. '3' or 'cat' or '{"iam":"ajson"}' + // but to convert this into JSON, strings need to be quoted: + // i.e. { "field1":3, "field2":"cat", "field3":{"iam":"ajson"} } + // this means we need to add enclosing quotes *only* for string fields + if((it->type()==18) || (it->type()==25) || (it->type()==1042) || (it->type()==1043)){ + m_args->tmpval += "\""+it->c_str()+"\""; + } else { + m_args->tmpval += it->c_str(); + } + } + m_args->tmpval += "}"; + + query.setresponse(i, m_args->tmpval); + + } + query.result.clear(); + + } + + } // loop over queries in this batch + + } else { + + // process batch of write queries + // these are interleaved but results are grouped by type + size_t devconfig_i = 0; + size_t runconfig_i = 0; + size_t calibration_i = 0; + size_t plotlyplot_i = 0; + size_t rootplot_i = 0; + bool devconfigs_ok = !m_args->batch->devconfig_version_nums.empty(); + bool runconfigs_ok = !m_args->batch->runconfig_version_nums.empty(); + bool calibrations_ok = !m_args->batch->calibration_version_nums.empty(); + bool plotlyplots_ok = !m_args->batch->plotlyplot_version_nums.empty(); + bool rootplots_ok = !m_args->batch->rootplot_version_nums.empty(); + + for(ZmqQuery& query : m_args->batch->queries){ + + switch(query.topic()[2]){ + // alarms return just the success status + case query_topic::alarm: + query.setsuccess(m_args->batch->alarm_batch_success); + query.setresponserows(0); + break; + + // everything else returns a version number + case query_topic::dev_config: + query.setsuccess(devconfigs_ok); + if(devconfigs_ok){ + query.setresponserows(1); + query.setresponse(0, devconfig_version_nums[devconfig_i++]); + } + break; + + case query_topic::run_config: + query.setsuccess(runconfigs_ok); + if(runconfigs_ok){ + query.setresponserows(1); + query.setresponse(0, runconfig_version_nums[runconfig_i++]); + } + break; + + case query_topic::calibration: + query.setsuccess(calibrations_ok); + if(calibrations_ok){ + query.setresponserows(1); + query.setresponse(0, calibration_version_nums[calibration_i++]); + } + break; + + case query_topic::plotlyplot: + query.setsuccess(plotlyplots_ok); + if(plotlyplots_ok){ + query.setresponserows(1); + query.setresponse(0, plotlyplot_version_nums[plotlyplot_i++]); + } + break; + + case query_topic::rootplot: + query.setsuccess(rootplots_ok); + if(rootplots_ok){ + query.setresponserows(1); + query.setresponse(0, rootplot_version_nums[rootplot_i++]); + } + break; + + default: + // FIXME corrupted topic, log it. + + } + + } // loop over queries in this batch + + } // if/else on whether this batch was read/write + +// } // loop over query batches + + // pass the batch onto the next stage of the pipeline for the DatabaseWorkers + std::unique_lock locker(m_args->m_data->query_replies_mtx); + //m_args->m_data->query_replies.insert(m_args->m_data->query_replies.end(), + // m_args->local_msg_queue.begin(),m_args->local_msg_queue.end()); + m_args->m_data->query_replies.push_back(m_args->batch); + locker.unlock(); + + ++(*m_args->m_data->result_worker_job_successes); + + // return our job args to the pool + m_args->m_pool.Add(m_args); // return our job args to the job args struct pool + m_args = nullptr; // clear the local m_args variable... not strictly necessary + arg = nullptr; // clear the job 'data' member variable + + return; +} + + diff --git a/UserTools/ResultWorkers/ResultWorkers.h b/UserTools/ResultWorkers/ResultWorkers.h new file mode 100644 index 0000000..0029cc9 --- /dev/null +++ b/UserTools/ResultWorkers/ResultWorkers.h @@ -0,0 +1,58 @@ +#ifndef ResultWorkers_H +#define ResultWorkers_H + +#include +#include + +#include "Tool.h" +#include "DataModel.h" + + +/** +* \class ResultWorkers +* +* This Tool spawns jobs that convert pqxx::result objects from read queries into zmq::message_t objects ready for sending back to clients +* +* $Author: Marcus O'Flaherty $ +* $Date: 2025/12/10 $ +* Contact: marcus.o-flaherty@warwick.ac.uk +*/ + +// class for things passed to result worker threads +struct ResultJobStruct { + + ResultJobStruct(Pool* pool, DataModel* data) : m_pool(pool) m_data(data){}; + DataModel* m_data; + Pool* m_pool; + QueryBatch* batch; + std::stringstream ss; + std::string tmpval; + +}; + +struct ResultJobDistributor_args : Thread_args { + + DataModel* m_data; + std::vector local_msg_queue; // swap with datamodel and then pass out to jobs + Pool job_struct_pool(true, 1000, 100); ///< pool for job args structs // FIXME default args + +}; + +class ResultWorkers: public Tool { + + public: + ResultWorkers(); ///< Simple constructor + bool Initialise(std::string configfile,DataModel &data); ///< Initialise Function for setting up Tool resorces. @param configfile The path and name of the dynamic configuration file to read in. @param data A reference to the transient data class used to pass information between Tools. + bool Execute(); ///< Execute function used to perform Tool purpose. + bool Finalise(); ///< Finalise funciton used to clean up resources. + + private: + static void Thread(Thread_args* args); + ResultJobDistributor_args thread_args; ///< args for the child thread that makes jobs for the job queue + + static void ResultJob(void*& arg); + static void ResultJobFail(void*& args); + +}; + +#endif diff --git a/UserTools/SocketManager/README.md b/UserTools/SocketManager/README.md new file mode 100644 index 0000000..2fc073f --- /dev/null +++ b/UserTools/SocketManager/README.md @@ -0,0 +1,19 @@ +# SocketManager + +SocketManager + +## Data + +Describe any data formats SocketManager creates, destroys, changes, analyzes, or its usage. + + + + +## Configuration + +Describe any configuration variables for SocketManager. + +``` +param1 value1 +param2 value2 +``` diff --git a/UserTools/SocketManager/SocketManager.cpp b/UserTools/SocketManager/SocketManager.cpp new file mode 100644 index 0000000..4ead4f4 --- /dev/null +++ b/UserTools/SocketManager/SocketManager.cpp @@ -0,0 +1,107 @@ +#include "SocketManager.h" + +SocketManager::SocketManager():Tool(){} + + +bool SocketManager::Initialise(std::string configfile, DataModel &data){ + + if(configfile!="") m_variables.Initialise(configfile); + //m_variables.Print(); + + m_data= &data; + m_log= m_data->Log; + + if(!m_variables.Get("verbose",m_verbose)) m_verbose=1; + int update_ms=2000; + m_variables.Get("verbose",update_ms); + + thread_args.m_data = m_data; + thread_args.update_period_ms = std::chrono::milliseconds{update_ms}; + thread_args.last_update = std::chrononow(); + m_data->utils.CreateThread("socket_manager", &Thread, &thread_args); + m_data->num_threads++; + + return true; +} + + +bool SocketManager::Execute(){ + + if(!thread_args->running){ + Log(m_tool_name+" Execute found thread not running!",v_error); + Finalise(); + Initialise(); // FIXME should we give up if Initialise returns false? should we set StopLoop to 1? + ++m_data->socket_manager_thread_crashes; + } + + return true; +} + + +bool SocketManager::Finalise(){ + + // signal job distributor thread to stop + Log(m_tool_name+": Joining socket manager thread",v_warning); + m_data->utils.KillThread(&thread_args); + Log(m_tool_name+": Finished",v_warning); + m_data->num_threads--; + + return true; +} + +void SocketManager::Thread(Thread_args* args){ + + SocketManager_args* m_args = dynamic_cast(args); + + m_args->last_update = std::chrononow(); + m_args->m_data->Log("checking for new clients",22); //FIXME + + bool new_clients=false; + + std::unique_lock container_locker(managed_sockets_mtx); + for(SocketConnection* sock : m_data->managed_sockets){ + + std::unique_lock locker(sock->socket_mtx); + int new_conn_count = (sock->connections.size() - m_args->m_util->UpdateConnections(sock->service_name, sock->socket, sock->connections, "", sock->port_name)); + locker.unlock(); + + if(new_conn_count!=0){ + new_clients = true; + m_args->m_data->services->SendLog(m_tool_name+": "+std::to_string(std::abs(new_conn_count))+" new connections to "+sock->service_name, v_message); + + // update the list of clients so they can be queried + for(std::pair& aservice : sock->connections){ + if(!m_args->clientsmap.count(aservice.first)){ + m_args->clientsmap.emplace(aservice.first,sock->service_name); + } else { + m_args->clientsmap.at(aservice.first)+= ", "+sock->service_name; + } + } + + } + + } + container_locker.unlock(); + + if(new_clients){ + + std::string clientlist; + for(std::pair& aclient : m_args->clientsmap){ + if(!clientlist.empty()) clientlist+="\n"; + clientlist += aclient.first+": "+aclient.second; + } + if(clientlist.size()>0){ + // if client list is non-empty, remove trailing newline and set as slow control indicator + clientlist.pop_back(); + m_args->m_data->sc_vars["Clients"]->SetValue(clientlist); + } + + } + + std::this_thread::sleep_until(m_args->last_update+m_args->update_period_ms); + + return; + +} + + diff --git a/UserTools/SocketManager/SocketManager.h b/UserTools/SocketManager/SocketManager.h new file mode 100644 index 0000000..83dab3b --- /dev/null +++ b/UserTools/SocketManager/SocketManager.h @@ -0,0 +1,44 @@ +#ifndef SocketManager_H +#define SocketManager_H + +#include + +#include "Tool.h" +#include "DataModel.h" + +/** +* \class SocketManager +* +* This Tool uses the DAQUtils class to periodically find new clients advertising relevant services and make new connections to their respective zmq sockets. +* +* $Author: Marcus O'Flaherty $ +* $Date: 2025/12/11 $ +* Contact: marcus.o-flaherty@warwick.ac.uk +*/ + +struct SocketManager_args : public Thread_args { + + DataModel* m_data; + std::map clientsmap; + + std::map> last_update; + std::chrono::milliseconds update_period_ms; + +}; + +class SocketManager: public Tool { + + public: + SocketManager(); ///< Simple constructor + bool Initialise(std::string configfile,DataModel &data); ///< Initialise Function for setting up Tool resorces. @param configfile The path and name of the dynamic configuration file to read in. @param data A reference to the transient data class used to pass information between Tools. + bool Execute(); ///< Execute function used to perform Tool purpose. + bool Finalise(); ///< Finalise function used to clean up resources. + + private: + static void Thread(Thread_args* args); + SocketManager_args thread_args; + +}; + + +#endif diff --git a/UserTools/Unity.h b/UserTools/Unity.h index a38d466..a51bdce 100644 --- a/UserTools/Unity.h +++ b/UserTools/Unity.h @@ -1 +1,13 @@ #include +#include "MulticastReceiver.h" +#include "MulticastWorkers.h" +#include "DatabaseWorkers.h" +#include "QueueTrimmer.h" +#include "WriteQueryReceiver.h" +#include "ReadQueryReceiverReplySender.h" +#include "WriteWorkers.h" +#include "MiddlemanNegotiate.h" +#include "Monitoring.h" +#include "SocketManager.h" +#include "ResultWorkers.h" +#include "JobManager.h" diff --git a/UserTools/WriteQueryReceiver/README.md b/UserTools/WriteQueryReceiver/README.md new file mode 100644 index 0000000..c36a581 --- /dev/null +++ b/UserTools/WriteQueryReceiver/README.md @@ -0,0 +1,19 @@ +# PubReceiver + +PubReceiver + +## Data + +Describe any data formats PubReceiver creates, destroys, changes, analyzes, or its usage. + + + + +## Configuration + +Describe any configuration variables for PubReceiver. + +``` +param1 value1 +param2 value2 +``` diff --git a/UserTools/WriteQueryReceiver/WriteQueryReceiver.cpp b/UserTools/WriteQueryReceiver/WriteQueryReceiver.cpp new file mode 100644 index 0000000..5fc7b7d --- /dev/null +++ b/UserTools/WriteQueryReceiver/WriteQueryReceiver.cpp @@ -0,0 +1,244 @@ +#include "WriteQueryReceiver.h" + +WriteQueryReceiver::WriteQueryReceiver():Tool(){} + + +bool WriteQueryReceiver::Initialise(std::string configfile, DataModel &data){ + + if(configfile!="") m_variables.Initialise(configfile); + //m_variables.Print(); + + m_data= &data; + m_log= m_data->Log; + + /* ----------------------------------------- */ + /* Configuration */ + /* ----------------------------------------- */ + + bool am_master = true; // FIXME not sure being used any more + m_verbose=1; + port_name = "db_write"; + // FIXME do these timeouts need to be << transfer_period_ms? + int rcv_timeout_ms = 500; + int poll_timeout_ms = 500; + int transfer_period_ms = 200; + int local_buffer_size = 200; + + m_variables.Get("verbose",m_verbose); + m_variables.Get("rcv_timeout_ms",rcv_timeout_ms); + m_variables.Get("poll_timeout_ms",poll_timeout_ms); + m_variables.Get("port_name", port_name); + m_variables.Get("am_master", am_master); + m_variables.Get("local_buffer_size", local_buffer_size); + m_variables.Get("transfer_period_ms", transfer_period_ms); + + /* ----------------------------------------- */ + /* Socket Setup */ + /* ----------------------------------------- */ + + // Write queries are received via a SUB socket so they get to both middlemen - only the master runs the query. + // acknowledgements and any 'returning' results are sent on the ROUTER socket used for receiving read queries + + // socket to receive published write queries from clients + // ------------------------------------------------------- + ManagedSocket* managed_socket = new ManagedSocket; + managed_socket->service_name=""; // attach to any client type... + managed_socket->port_name = port_name; // ...that advertises a service on port 'port_name' + managed_socket->socket = new zmq::socket_t(*m_data->context, ZMQ_SUB); + // this socket never sends, so a send timeout is irrelevant. + managed_socket->socket->setsockopt(ZMQ_RCVTIMEO, rcv_timeout_ms); + // don't linger too long, it looks like the program crashed. + managed_socket->socket->setsockopt(ZMQ_LINGER, 10); + managed_socket->socket->setsockopt(ZMQ_SUBSCRIBE,"",0); + managed_socket->socket->setsockopt(ZMQ_RCVHWM,10000); // TODO are these sufficient? + managed_socket->socket->setsockopt(ZMQ_BACKLOG,1000); // TODO any other options? + + // add the socket to the datamodel for the SocketManager, which will handle making new connections to clients + std::unique_lock locker(m_data->managed_sockets_mtx); + m_data->managed_sockets[port_name] = managed_socket; + locker.unlock(); + + /* ----------------------------------------- */ + /* Thread Setup */ + /* ----------------------------------------- */ + + thread_args.m_data = m_data; + thread_args.socket = managed_socket->socket; + thread_args.socket_mtx = managed_socket->socket_mtx; // for sharing socket with SocketManager + thread_args.poll_timeout_ms = poll_timeout_ms; + thread_args.poll = zmq::pollitem_t{NULL, socket, ZMQ_POLLIN, 0}; + thread_args.in_local_queue = m_data->querybatch_pool.GetNew(); + thread_args.in_local_queue.reserve(local_buffer_size); + thread_args.local_buffer_size = local_buffer_size; + thread_args.transfer_period_ms = transfer_period_ms; + thread_args.make_new = true; + m_data->utils.CreateThread("write_query_receiver", &Thread, &thread_args); // thread needs a unique name + m_data->num_threads++; + + return true; +} + +// FIXME renoame to writequeryreceiver +bool WriteQueryReceiver::Execute(){ + + if(!thread_args.running){ + Log(m_tool_name+" Execute found thread not running!",v_error); + Finalise(); + Initialise(); // FIXME should we give up if Initialise returns false? should we set StopLoop to 1? + ++m_data->pub_rcv_thread_crashes; + } + + /* + FIXME are we doing this + if(m_data->am_master != am_master_last){ + if(m_data->am_master) Promote(); + else Demote(); + } + */ + + return true; +} + + +bool WriteQueryReceiver::Finalise(){ + + // signal background receiver thread to stop + Log(m_tool_name+": Joining receiver thread",v_warning); + m_data->utils.KillThread(&thread_args); + Log(m_tool_name+": Finished",v_warning); + m_data->num_threads--; + + if(m_data->managed_sockets.count(port_name)){ + std::unique_lock lock(m_data->managed_sockets_mtx); + ManagedSocket* sock = m_data->managed_sockets[port_name]; + m_data->managed_sockets.erase(port_name); + locker.unlock(); + if(sock->socket) delete sock->socket; // destructor closes socket + delete sock; + } + + return true; +} + +void WriteQueryReceiver::Thread(Thread_args* args){ + + WriteQueryReceiver_args* m_args = reinterpret_cast(args); + + // transfer to datamodel + // ===================== + if(m_args->in_local_queue->size() >= m_args->local_buffer_size || + (m_args->last_transfer - std::chrononow()) > transfer_period_ms){ + + if(!make_new) pop_back(); + if(!m_args->in_local_queue->empty()){ + + std::unique_lock locker(m_args->m_data->write_msg_queue_mtx); + m_args->m_data->write_msg_queue.push_back(m_args->in_local_queue); + locker.unlock(); + + m_args->in_local_queue = m_args->m_data->querybatch_pool.GetNew(); + m_args->in_local_queue.reserve(m_args->local_buffer_size); + + m_args->m_data->Log(m_tool_name+": added "+std::to_string(next_index) + +" messages to datamodel",5); // FIXME better logging + m_args->last_transfer = std::chrononow(); + m_args->make_new=true; + ++(*m_args->m_data->write_buffer_transfers); + + } + + } + + // poll + // ==== + try { + std::unique_lock lock(m_args->socket_mtx); + m_args->get_ok = zmq::poll(&m_args->poll, 1, m_args->poll_timeout_ms); + } catch(zmq::error_t& err){ + // ignore poll aborting due to signals + if(zmq_errno()==EINTR) return; + std::cerr<running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? + ++(*m_args->m_data->write_polls_failed); + return; + } // FIXME catch non-zmq errors? can we handle them any better? + catch(...){ + std::cerr<running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? + ++(*m_args->m_data->write_polls_failed); + return; + } + if(m_args->get_ok<0){ + std::cerr<running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? + ++(*m_args->m_data->write_polls_failed); + return; + } + + // read + // ==== + if(m_args->poll.revents & ZMQ_POLLIN){ + m_data->Log(m_tool_name+": got a write query from client",v_debug); + + if(m_args->make_new){ + m_args->in_local_queue->emplace_back(); // FIXME we could resize(local_buffer_size) on retreive new + m_args->make_new = false; // then resize down to actual size on transfer out + } + ZmqQuery& msg_buf = m_args->in_local_queue->back().queries; + msg_buf.resize(4); + // received parts are [topic, client, msg_id, query] + // reorder parts on receipt as client and msg_id will be left untouched and re-used for response + static constexpr char part_order[4] = {2,0,1,3}; + + std::unique_lock locker(m_args->socket_mtx); + for(m_args->msg_parts=0; m_args->msg_parts<4; ++m_args->msg_parts){ + m_args->get_ok = m_args->socket->recv(&msg_buf[part_order[m_args->msg_parts]]); + if(!m_args->get_ok || !msg_buf[part_order[m_args->msg_parts]].more()) break; + } + + // if there are more than 4 parts, read the remainder to flush the buffer, but discard the message + if(m_args->get_ok && msg_buf[3].more()){ + while(true){ + m_args->socket->recv(&m_args->msg_discard); + ++m_args->msg_parts; + } + } + locker.unlock(); + + // if the read failed, discard the message + if(!m_args->get_ok){ + std::cerr<msg_parts<m_data->write_rcv_fails); + return; + } + + // if there weren't 4 parts, discard the message + if(m_args->msg_parts!=4){ + std::cerr<msg_parts<<" part message"<m_data->write_bad_msgs); + return; + } + + // else success + make_new=true; + ++(*m_args->m_data->write_msgs_rcvd); + + } // else no messages from clients + + return; +} + +/* +bool WriteQueryReceiver::Promote(){ + // FIXME TODO if using a standby, need to connect to clients +} + +bool WriteQueryReceiver::Demote(){ + // FIXME TODO if using a standby, need to disconnect from clients + // (to prevent zmq buffering messages, and avoid load of unnecessarily reading them) +} +*/ diff --git a/UserTools/WriteQueryReceiver/WriteQueryReceiver.h b/UserTools/WriteQueryReceiver/WriteQueryReceiver.h new file mode 100644 index 0000000..f068d3e --- /dev/null +++ b/UserTools/WriteQueryReceiver/WriteQueryReceiver.h @@ -0,0 +1,62 @@ +#ifndef WriteQueryReceiver_H +#define WriteQueryReceiver_H + +#include +#include + +#include "Tool.h" +#include "DataModel.h" + +/** +* \class WriteQueryReceiver +* +* This Tool receives Write queries from clients over a ZMQ_SUB socket and pushes them to the DataModel +* +* $Author: M. O'Flaherty $ +* $Date: 2025/11/26 $ +* Contact: marcus.o-flaherty@warwick.ac.uk +*/ + + +struct WriteQueryReceiver_args : public Thread_args { + + DataModel* m_data; + zmq::socket_t* socket=nullptr; + std::mutex* socket_mtx; // for sharing the socket with ServicesManager Tool for finding clients + + int poll_timeout_ms; + zmq::pollitem_t poll; + zmq::message_t msg_discard; + bool make_new; + int msg_parts; + int get_ok; + QueryBatch* in_local_queue; + + std::chrono::time_point last_transfer; + std::chrono::milliseconds transfer_period_ms; + size_t local_buffer_size; + +}; + +class WriteQueryReceiver: public Tool { + + public: + WriteQueryReceiver(); ///< Simple constructor + bool Initialise(std::string configfile,DataModel &data); ///< Initialise Function for setting up Tool resorces. @param configfile The path and name of the dynamic configuration file to read in. @param data A reference to the transient data class used to pass information between Tools. + bool Execute(); ///< Executre function used to perform Tool perpose. + bool Finalise(); ///< Finalise funciton used to clean up resorces + + private: + static void Thread(Thread_args* args); + WriteQueryReceiver_args thread_args; + + std::string port_name; // name by which clients advertise sockets for sending write queries to the DB + + bool am_master; + //bool Promote(); ///< Connect to clients to start receiving messages, if we became master + //bool Demote(); ///< Disconnect from clients to stop receiving & processing messages, if we are no longer master + + +}; + +#endif diff --git a/UserTools/WriteWorkers/README.md b/UserTools/WriteWorkers/README.md new file mode 100644 index 0000000..d5890a0 --- /dev/null +++ b/UserTools/WriteWorkers/README.md @@ -0,0 +1,19 @@ +# ReadWriteWorkers + +ReadWriteWorkers + +## Data + +Describe any data formats ReadWriteWorkers creates, destroys, changes, analyzes, or its usage. + + + + +## Configuration + +Describe any configuration variables for ReadWriteWorkers. + +``` +param1 value1 +param2 value2 +``` diff --git a/UserTools/WriteWorkers/WriteWorkers.cpp b/UserTools/WriteWorkers/WriteWorkers.cpp new file mode 100644 index 0000000..aafbc1c --- /dev/null +++ b/UserTools/WriteWorkers/WriteWorkers.cpp @@ -0,0 +1,195 @@ +#include "WriteWorkers.h" + +WriteWorkers::WriteWorkers():Tool(){} + + +bool WriteWorkers::Initialise(std::string configfile, DataModel &data){ + + if(configfile!="") m_variables.Initialise(configfile); + //m_variables.Print(); + + m_data= &data; + m_log= m_data->Log; + + if(!m_variables.Get("verbose",m_verbose)) m_verbose=1; + + thread_args.m_data = m_data; + m_data->utils.CreateThread("write_job_distributor", &Thread, &thread_args); + m_data->num_threads++; + + return true; +} + + +bool WriteWorkers::Execute(){ + + // FIXME ok but actually this kills all our jobs, not just our job distributor + // so we don't want to do that. + if(!thread_args.running){ + Log(m_tool_name+" Execute found thread not running!",v_error); + Finalise(); + Initialise(); // FIXME should we give up if Initialise returns false? should we set StopLoop to 1? + ++m_data->write_job_distributor_thread_crashes; + } + + return true; +} + + +bool WriteWorkers::Finalise(){ + + // signal job distributor thread to stop + Log(m_tool_name+": Joining job distributor thread",v_warning); + m_data->utils.KillThread(&thread_args); + Log(m_tool_name+": Finished",v_warning); + m_data->num_threads--; + + return true; +} + + +void WriteWorkers::Thread(Thread_args* args){ + + WriteJobDistributor_args* m_args = reinterpret_cast(args); + + // grab a batch of write queries + std::unique_lock locker(m_args->m_data->write_msg_queue_mtx); + if(!m_args->m_data->write_msg_queue.empty()){ + std::swap(m_args->m_data->write_msg_queue, m_args->local_msg_queue); + } + locker.unlock(); + + // add a job for each batch to the queue + for(int i=0; ilocal_msg_queue.size(); ++i){ + + // add a new Job to the job queue to process this data + Job* the_job = m_args->m_data->job_pool.GetNew(&m_args->m_data->job_pool, "write_worker"); + if(the_job->data == nullptr){ + // on first creation of the job, make it a JobStruct to encapsulate its data + // N.B. Pool::GetNew will only invoke the constructor if this is a new instance, + // (not if it's been used before and then returned to the pool) + // so don't pass job-specific variables to the constructor + the_job->data = m_args->job_struct_pool.GetNew(&m_args->job_struct_pool, m_args->m_data); + } else { + // this should never happen as jobs should return their args to the pool + std::cerr<<"WriteWorker Job with non-null data pointer!"<(the_job->data); + job_data->local_msg_queue = m_args->local_msg_queue[i]; + + the_job->func = WriteMessageJob; + the_job->fail_func = WriteMessageFail; + + /*ok =*/ m_args->m_data->job_queue.AddJob(the_job); // just checks if you've defined func and first_vals = true; + + } + + return true; +} + +// ««-------------- ≪ °◇◆◇° ≫ --------------»» + +void WriteWorkers::WriteMessageFail(void*& arg){ + + // safety check in case the job somehow fails after returning its args to the pool + if(arg==nullptr){ + return; // FIXME log this occurrence? + } + + // FIXME do something here + // if there were preceding messages that were succesfully added + // we could try to insert the current buffers so that those get processed. + // but we don't know where we failed, so that could be risky if the buffer is corrupt? + // we could keep track of where we were in m_args and: + // 1. log the specific message we were trying to process when the job failed + // 2. submit the data we already have + // 3. make a new job for the remaining data + // this probably seems better, but be careful not to get stuck in a fail loop + // if the problem isn't the query + + // at minimum we need to pass our vector back somewhere for the failures + // to be reported to the clients + //m_args->m_data->query_buffer_pool.Add(m_args->msg_buffer); << FIXME not back to the pool but reply queue + + WriteJobStruct* m_args=reinterpret_cast(arg); + ++(*m_args->m_data->write_worker_job_fails); + + // return our job args to the pool + m_args->m_pool.Add(m_args); + m_args = nullptr; // clear the local m_args variable... not strictly necessary + arg = nullptr; // clear the job 'data' member variable + +} + +void WriteWorkers::WriteMessageJob(void*& arg){ + + WriteJobStruct* m_args = reinterpret_cast(arg); + + m_args->local_msg_queue.reset(); + + // pull next query from batch + for(size_t i=0; ilocal_msg_queue->queries.size(); ++i){ + + ZmqQuery& query = m_args->local_msg_queue->queries[i]; + + // we can only batch queries destined for the same table, + // so we need to split our messages up into different queues + // (this also means we can prioritise high priority queries such as alarms) + // we can do batch insertions with a 'returning version' statement to obtain + // a multi-record response with all the corresponding version numbers: e.g. + // INSERT INTO rootplots ( time, name, data ) SELECT * FROM jsonb_to_recordset + // ('[ {"time":"2025-12-05 23:31", "name":"dev1", "data":{"message":"blah"} }, + // {"time":"2025-12-05 23:25", "name":"dev2", "data":{"message":"argg"} } ]') + // as t(time timestamptz, name text, data jsonb) returning version;" + // as before, such batches need to be grouped according to destination table + switch(query_topic{query.topic()[2]}){ + // FIXME switch letter to query_type enum class + case query_topic::alarm: // ALARM + // alarm insertions require no return value, + // but we still need to send back an acknowledgement once the alarm is inserted + m_args->out_buffer = m_args->local_msg_queue->alarm_buffer; + break; + case query_topic::dev_config: // DEVCONFIG + m_args->out_buffer = m_args->local_msg_queue->devconfig_buffer; + break; + case query_topic::run_config: // RUNCONFIG + m_args->out_buffer = m_args->local_msg_queue->runconfig_buffer; + break; + case query_topic::calibration: // CALIBRATION + m_args->out_buffer = m_args->local_msg_queue->calibration_buffer; + break; + case query_topic::plotlyplot: // PLOTLYPLOT + m_args->out_buffer = m_args->local_msg_queue->plotlyplot_buffer; + break; + case query_topic::rootplot: // TROOTPLOT (yeah the T is just so it's unique...) FIXME maybe topic can just be a unique char + m_args->out_buffer = m_args->local_msg_queue->rooplot_buffer; + break; + default: + // FIXME unrecognised topic log it. + } + + if(i!=0) m_args->out_buffer += ", "; + m_args->out_buffer += query.msg(); + + } + + // pass the batch onto the next stage of the pipeline for the DatabaseWorkers + std::unique_lock locker(m_args->m_data->write_query_queue_mtx); + m_args->m_data->alarm_batch_queue.push_back(m_args->write_query_queue); + locker.unlock(); + + ++(*m_args->m_data->write_worker_job_successes); + + // return our job args to the pool + m_args->m_pool.Add(m_args); // return our job args to the job args struct pool + m_args = nullptr; // clear the local m_args variable... not strictly necessary + arg = nullptr; // clear the job 'data' member variable + + return; +} + + diff --git a/UserTools/WriteWorkers/WriteWorkers.h b/UserTools/WriteWorkers/WriteWorkers.h new file mode 100644 index 0000000..3761da0 --- /dev/null +++ b/UserTools/WriteWorkers/WriteWorkers.h @@ -0,0 +1,56 @@ +#ifndef WriteWorkers_H +#define WriteWorkers_H + +#include + +#include "Tool.h" +#include "DataModel.h" + +/** +* \class WriteWorkers +* +* This Tool uses a worker pool to process write queries, converting received messages (structs encapsulating batches of zmq::message_t) into a format suitable for the DatabaseWorkers (array of JSONs). +* +* $Author: M. O'Flaherty $ +* $Date: 2025/12/04 $ +* Contact: marcus.o-flaherty@warwick.ac.uk +*/ + +// class for things passed to multicast worker threads +struct WriteJobStruct { + + WriteJobStruct(Pool* pool, DataModel* data) : m_pool(pool) m_data(data){}; + DataModel* m_data; + Pool* m_pool; + QueryBatch* local_msg_queue; + +}; + +struct WriteJobDistributor_args : Thread_args { + + DataModel* m_data; + std::vector*> local_msg_queue; // swap with datamodel and then pass out to jobs + // maybe we can use shared_ptr instead of a job args pool? - only useful for jobs retaining their args, + // i.e. job queues of a single type of job. + Pool job_struct_pool(true, 1000, 100); ///< pool for job args structs // FIXME default args + +}; + +class WriteWorkers: public Tool { + + public: + WriteWorkers(); ///< Simple constructor + bool Initialise(std::string configfile,DataModel &data); ///< Initialise Function for setting up Tool resorces. @param configfile The path and name of the dynamic configuration file to read in. @param data A reference to the transient data class used to pass information between Tools. + bool Execute(); ///< Execute function used to perform Tool purpose. + bool Finalise(); ///< Finalise function used to clean up resources. + + private: + static void Thread(Thread_args* args); + WriteJobDistributor_args thread_args; ///< args for the child thread that makes jobs for the job queue + + static void WriteMessageFail(void*& arg); + static void WriteMessageJob(void*& arg); + +}; + +#endif From 2af2e8f13fe7c99a05b5068789c68b05740944ec Mon Sep 17 00:00:00 2001 From: Marcus O'Flaherty Date: Fri, 12 Dec 2025 17:31:30 +0000 Subject: [PATCH 02/12] mostly updates from having used old ToolDAQApplication template. Some DataModel changes --- .gitignore | 31 +++++++++ DataModel/DataModel.cpp | 4 +- DataModel/DataModel.h | 25 ++----- UserTools/DummyTool/DummyTool.cpp | 63 ++++++++++++----- UserTools/DummyTool/DummyTool.h | 4 +- UserTools/DummyTool/DummyTool.o | Bin 0 -> 52112 bytes UserTools/template/MyTool.cpp | 11 +-- UserTools/template/MyTool.h | 4 +- UserTools/template/MyTool.o | Bin 0 -> 19720 bytes .../template/MyToolDynamicMultiThread.cpp | 32 ++++----- UserTools/template/MyToolDynamicMultiThread.h | 8 +-- UserTools/template/MyToolDynamicMultiThread.o | Bin 0 -> 46912 bytes UserTools/template/MyToolMultiThread.cpp | 30 ++++---- UserTools/template/MyToolMultiThread.h | 5 +- UserTools/template/MyToolMultiThread.o | Bin 0 -> 50032 bytes UserTools/template/MyToolServiceAdd.cpp | 11 +-- UserTools/template/MyToolServiceAdd.h | 4 +- UserTools/template/MyToolServiceAdd.o | Bin 0 -> 39104 bytes UserTools/template/MyToolThread.cpp | 11 +-- UserTools/template/MyToolThread.h | 3 +- UserTools/template/MyToolThread.o | Bin 0 -> 37352 bytes UserTools/template/MyToolZMQMultiThread.cpp | 24 ++++--- UserTools/template/MyToolZMQMultiThread.h | 7 +- UserTools/template/MyToolZMQMultiThread.o | Bin 0 -> 69840 bytes UserTools/template/README.md | 20 +----- configfiles/Dummy/ToolChainConfig | 65 +++++++++++++----- configfiles/template/ToolChainConfig | 64 ++++++++++++----- src/main.cpp | 11 ++- 28 files changed, 267 insertions(+), 170 deletions(-) create mode 100644 .gitignore create mode 100644 UserTools/DummyTool/DummyTool.o create mode 100644 UserTools/template/MyTool.o create mode 100644 UserTools/template/MyToolDynamicMultiThread.o create mode 100644 UserTools/template/MyToolMultiThread.o create mode 100644 UserTools/template/MyToolServiceAdd.o create mode 100644 UserTools/template/MyToolThread.o create mode 100644 UserTools/template/MyToolZMQMultiThread.o diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..3ccff8f --- /dev/null +++ b/.gitignore @@ -0,0 +1,31 @@ +# backups +*~ +\#*# +# thumbnails +.DS_Store +# objects +*.o +# libraries +*.so +*.a +# dependency files +*.d +# root dictionary pcm files +*.pcm +# root dictionary sourcefiles +*Dict.cxx +*Dict.h +# python pre-compiled modules +*.pyc +# data files +*.root + +# the dependencies symlink +Dependencies +# the main executable +main +# lib folder is just build products +lib/* +# the include folder is actually automatically populated +include/* + diff --git a/DataModel/DataModel.cpp b/DataModel/DataModel.cpp index 98b11f4..e1267cf 100644 --- a/DataModel/DataModel.cpp +++ b/DataModel/DataModel.cpp @@ -1,6 +1,6 @@ #include "DataModel.h" -DataModel::DataModel(){} +DataModel::DataModel():DAQDataModelBase(){} /* TTree* DataModel::GetTTree(std::string name){ @@ -17,7 +17,7 @@ void DataModel::AddTTree(std::string name,TTree *tree){ } -void DataModel::DeleteTTree(std::string name){ +void DataModel::DeleteTTree(std::string name,TTree *tree){ m_trees.erase(name); diff --git a/DataModel/DataModel.h b/DataModel/DataModel.h index 06df1f9..ef85719 100644 --- a/DataModel/DataModel.h +++ b/DataModel/DataModel.h @@ -6,7 +6,6 @@ #include #include "DAQDataModelBase.h" -#include "Utilities.h" #include "Pool.h" #include "JobQueue.h" @@ -31,7 +30,7 @@ class DataModel : public DAQDataModelBase { private: - Utilities utils; ///< for thread management + DAQUtilities utils; ///< for thread management // Tools can add connections to this and the SocketManager // will periodically invoke UpdateConnections to connect clients @@ -101,7 +100,6 @@ class DataModel : public DAQDataModelBase { /* PubReceiver */ /* ----------------------------------------- */ // TODO Tool monitoring struct? - std::atomic pub_rcv_thread_crashes; std::vector write_msg_queue; std::mutex write_msg_queue_mtx; std::atomic write_polls_failed; @@ -109,22 +107,18 @@ class DataModel : public DAQDataModelBase { std::atomic write_rcv_fails; std::atomic write_bad_msgs; std::atomic write_buffer_transfers; - + std::atomic pub_rcv_thread_crashes; //} - // TODO move to struct - { - zmq::socket_t* pub_socket; - std::mutex pub_socket_mtx; // socket needed by Reader and SocketManager (for finding new clients) - } - /* ----------------------------------------- */ /* ReadReply */ /* ----------------------------------------- */ // TODO Tool monitoring struct? - std::atomic read_rcv_thread_crashes; std::vector read_msg_queue; std::mutex read_msg_queue_mtx; + std::vector query_replies; + std::mutex query_replies_mtx; + std::atomic readrep_polls_failed; std::atomic readrep_msgs_rcvd; std::atomic readrep_rcv_fails; @@ -133,15 +127,8 @@ class DataModel : public DAQDataModelBase { std::atomic readrep_rep_send_fails; std::atomic readrep_in_buffer_transfers; std::atomic readrep_out_buffer_transfers; + std::atomic read_rcv_thread_crashes; - // TODO move to struct - { - zmq::socket_t* read_socket; - std::mutex read_socket_mtx; // socket needed by Reader and SocketManager (for finding new clients) - } - - std::vector query_replies; - std::mutex query_replies_mtx; /* ----------------------------------------- */ /* MulticastWorkers */ diff --git a/UserTools/DummyTool/DummyTool.cpp b/UserTools/DummyTool/DummyTool.cpp index b99ff88..b7ccdee 100644 --- a/UserTools/DummyTool/DummyTool.cpp +++ b/UserTools/DummyTool/DummyTool.cpp @@ -5,37 +5,68 @@ DummyTool::DummyTool():Tool(){} bool DummyTool::Initialise(std::string configfile, DataModel &data){ - if(configfile!="") m_variables.Initialise(configfile); - //m_variables.Print(); + InitialiseTool(data); + InitialiseConfiguration(configfile); - m_data= &data; - m_log= m_data->Log; + //m_variables.Print(); - if(!m_variables.Get("verbose",m_verbose)) m_verbose=1; + + if(!m_variables.Get("verbose",m_verbose)) m_verbose=1; - Log("test 1",1,m_verbose); + Log("test 1",1); + + ExportConfiguration(); return true; } bool DummyTool::Execute(){ + + // example of print out methods + // mesage level indicates the minimum verbosity level to print out a message + // Therefore a message level of 0 is always printed so should be used for high priority messages e.g. errors + // and a message level or 9 would be for minor messgaes rarely printed - // Dummy test of various printout sytles and techniques + Log("test 2a"); // defualt log function message level is 0. + //Note: calls to the Log function are thread safe. + //Note: tool name is appended to log message ustomatically - Log("test 2",1,m_verbose); - Log("test 3",1,5); - *m_log< #include "Tool.h" +#include "DataModel.h" /** * \class DummyTool @@ -13,8 +14,8 @@ * * $Author: B.Richards $ * $Date: 2019/05/28 10:44:00 $ -* Contact: b.richards@qmul.ac.uk */ + class DummyTool: public Tool { @@ -28,6 +29,7 @@ class DummyTool: public Tool { private: + }; diff --git a/UserTools/DummyTool/DummyTool.o b/UserTools/DummyTool/DummyTool.o new file mode 100644 index 0000000000000000000000000000000000000000..a1ad179196b6b1f1b9f0b3d9bade20f9fb150391 GIT binary patch literal 52112 zcmd6Q34B!5_5VwTfryw1iZv?A0E2=`%mk3cHIW3~=tNT%cRLBmK%&`9CJ-tJn29nD zQC#|yR@xu7xU{t`wbborH42K_4Q*|yb)nXU5i7V9clmwKU1rX_nJWSP{rkTkUfz4( zckVgoo_p@O+ndMIs^Ik8oE%H19P4x|b*E9wD)^t&b-B1Kx6ZUG1nt4Nr#y$?7=hzZ z9N)lk7>+_5^z0}cN8=cU!;6Eku{eC{dK}KjsOw{K9m6<)**WfF+`)Ue;u?AnU9nZ7Np7liYKyAl@1lR?1 zQI3neC(8>2T54}D&m$e|SV6k5QV4TBznwo#k| zj{8;finM;Zy`iziKDC#>y&Bnq@%gp!HZ(M7O)giAhE)z`gM3sC@ah@x_np)PONA6P zL8)4*>7dn>dZG)^M6N)cTj7*Jyd>*wU9A7rP`w`hP zTLK{$=q4arp`+nl0EvS=*F+Hzsn9hK4I$&nNCAJKQWq-YukHA*}H zYKV#yO`kOAX6=xa=>3-`_6j01HOF@Ql*PUR)&gq?$YTI}Qgd7?Dw#nb_jTwXKQ40SX2^!^iN)Tl`e_y?Jn zibffXePOGZYsag|?Y9Tx8^+7#xZ4v=pku07c2y}_L7)Jao<|l;OJ-2M?UV}>CEH?l zqpTj7w$d8Igl(n>6D7NcFSo|y(AudL`jRxGYiq|l8+Czn7HQV>yWW0J@6-Fhc0ksR zqw81JJ<_uu*0tkbG3fWJU8UPzxS`zlgTr^ z6WB;}DCwuFBZLO`;5%%;b2(P_* zY6fEkYJa#2!?XFr$gMWd=%-+*JPQM}9yKt^9*nmLUNWF+W?!dOU&#NOtvUw`l~ubi zR@-Oi$`&=%2x_kpGX~lF9Wh^_%-M!cStI7hXgFUXV$Opw3TFe2nAA>5+<+pcZY6pE z5;-mL+k-IANRD6A2~rb|bevpyCFE*Ux?gX9SHB!2p^v6{hZyMDaWNXVW4M>&CKdi? zV+@DMM!Rn1sh$@TjVDwqkVBj5_y+ebG>z(B3~hc&fk`&b>+_TwB}cx=p$!dYFsLg` zn{EVP*Hp9G-%jK5ES<(v8P*W8(mIW{fpr=S5pmKDn>q5-gP{HCGz#{GzcE_gqsE&5 z@BE0;TMVlUC(bn>}LA$->SUF{$Lq zIm^HX^Y0vxsqWXw|Jr@wk6PYW{k{9be?RQ=AeyVQI*s;8n==l~{xkQ5zr(Y&UZ$bk zPUxd)_S)O-hwWGF3xD|WzSiyi`@(-e#$Yx3!rx*0)C`tIm18NC$f$BIR<8Crs@U<1 zYtvTGsMER#Ow*Rh#1a{w(0168=}{t zC!3wV@iApkb~*qzUtZqxK4km@xFP;;s*%nhU#3CxMLoPJ4Vr_SRXPME2XLn*Zo$Yj zxjvfy{L98~FJLySsU73DV0;0M)ncu*YE0guSGs7?OE(Z3OZ`~}aLFzc%Y1SWmulS) zt3e!=ivv(vEEjvCf0NF=FvGXS2r3v`NFz2Ka(O48D0n-$UaU)!w9DiK70_X?FSEx+ zsG+$y>_KIw@hq%CO8~T-PD9)A`!JqOTR0EIqhjHln0Rf(j_4HdUDi!a{Zk0=Jkp-A zelCZu7}1XZk`ZmhU1NaOE2>*c>C`2S9x0lY_h%eMEiRh-JJ>$S4fP!%#fx z+X4_#6`L?o-&R~x3idT(H8tv^o#rQHK2!~DF-3*=U~HGt3sdMl1yZk7$SCz{ph=<7 z>nAju^V~Ps?pq@=+ObP~1<;Wg+hW~%o$n*$qq%zlI=`fU zZj0Slg{s3ccl7U`=nBCHYvJ*GM4^K5b)rBirI|6$%TPLmr4p(19azNB#>}dy)DmH% zI88OCSg`LNQGt#fZ5r9#kE*g!7*Rs(Bo!uik{*^>*zQs=zNC~)x0Z_5S5^E_TPH4J zYkXvD)ED6k!X0zx#cuJf!!_#aPMpG2m#cAsVKiZP;e>1mf?{{IEsyS+edX1$FW|B} zhHHPVjxQ+0sz{_H5N{nHh+pNkdp;lHx%Ss!V{h)qh-8lqN9=ta_r3ky&sN7iK#l_B z==y^l2ftuhgU{=UdcdDn8NAi;2H$wt6x(+JxP)9^h`l0=Mjh3|f{|y%%Qy!?>)nwr z;(m2}DfHMADXESxgdR)1fu1j*$L~{m(ArJ!=Se+2Lk{S%r|Si!-PG_hUEe5LKf4N9UzkiV;==` zc2OI}NW_K{1!Egj8~p-p^ls5cv9-{9A4~wZ8d9-B?8_iOje5$(9(i>P`ogV>E#h87 zT~aY|FArcN*M=5W1$J?2WK0Us=J{Xj-se2go50;Jo9**P4iH`ck8uAe+7>)4NUFUT zjJ+WWWcO6nBj|7<(((HgvNU z!7|vl!PgGGFoL)HY?N7`6dj+Y<;Gfyq9Q0hl|%dVa^N-3v$LNpQQ+SljE^HDsC=H` zQQrj!qjYY>xtfTGMF6EPBaCE2uT4Cl4ur~3T6$!;PVxVk|=RfA`UK0|~nNUs5sM>-Uf>hnBpC<#49eU=NR zc;7=wlm_rjSSfk-^cQ+2M12KNRfj8GR={3@d-@xRq*#$qsVGF3i+iyR-Y#oeEs^%E z^wr{7DhiS^qO)4*t5HnoEe<;_`v9^UZYkNZ$4Kq&V9r03U)>KMAg@?~r|<>%^;R?< z=@mbqW{Hwx{0>L-+(&&7Nmd1*Y!~3x|1?}<491dIVO$vOTjwL;{^v*~%v)@0N!NS7OqT!+NF7;qGU>-sC>K$Lyix}K$9m=xJ9}hj8;xYLp0G{B0XI> z$byh`N7Xr`Fy^mXCh4VQs<>75ic+zv_cc%Nx8eJh@o=Mv0qAM%_@%Yv_YY8oBcYq< zr!%Rtp3d) z>9w&7m2eR*)EgvJOY}Au38(Pkc!`9}e#3B-kUUJ}OT8vCVE@&xWh~!B<3_{!OEpz6 zb*7?n6N7fEd7J9-UKV~qV~^<{p$qS;`uIU;yUZ?o%F|a$$b;zFm*Ut%(Dl2qmXH_; ze3c!0%Z{aT=c(L@J)rQ6Dz_>gflRAbc=0kl=>cFFvp`whKaIV}Nb0 zYJ{<@iihcMq8qa_%AcWM5oPb2&h+z=%yj);CjABh$9(WTuHP;^pxlW!wSJFE{XPsV zr_w>cPe{P>J*1y!C6%W#_EF*^L5_K-u;aIA7Bu12!4n%Knuu*Wb1I-*nN46d8twMcFH8CQI zt9FN0A6@+!tpdJkr$*JiKSBMWO(vG$k>n%%x?Myf^w5cV_`MuwQ{1>+;25|h{5o#f z@TV1IwLaN3`k#|#Lk)|bQdYX408|U5Y4q0(Jp>(jlz0d;6Mf=6)gFne!2evbXV6`Z z!HH7-ndU^-(OPOs{}AFLHO$0??`d3*60)unpD)&l)VlInxJm6-3p)+g{N^$HVYcy_ zC}yS4PE^6bLVkseFSL)m0hJ@dQF3f|7`Ds2@_YT-CUk;HdCHO42nG?@o$e0mVF$IePVm2Ka1&15qP;Q4@k><`(vBC#@Ml^{Ck2EKC(}f!MqThYGz><2Gz;uSbjdQKI#!Jo$21&NkVNC?7H;t^cw zH8CzsJbZ%~eQhgbo4?YE8_aISc5S3nGO_oSd)^*z$H+TrR@|#X41B8`?@%C`((M0m zfr`)y5CKS_>qYa!T`=BFivXBY0|~^jUPp^y@1hlA1E(p=yKhszN}R%vaO|;um6&w>dIY_=|qz#iL?^ z7s+f?N~$CEvT^U~KWHvNB zG7x(?@o$Q8il5v+!=tX(15h&nH3MmC0tG*zW<>Lon^ub7ONrHtU-A+0L);Ln7lAlN zOk_D(a@nZ+Xwiep{ySt>ioU5!=ZR7?3&dVbOvG8g!vl$)-W~KHz7(8iK-EwhUn~su z%@9RDLUELwN!3FGu~!mLV4$ft4XDsN!~ofjcbD7o3uwilCZ%SOnwVH@Qandy@H~vh zf_aHc9b6OC{g{}Z>f=0*%;5PAJ2ujn_y8@NEBY2GisoWcw16`;C|ZzM;ozFEdp3<{ zKNe9lbXpJZQETB?&WJB<*Q?&iI<(`Lm!kbeE`oEb2B5rLEF&|zq?boV%i2{Qjs_bU z8H~NY<2R}PR#!Bhb-gCXMjH!l%v!VseP9z)e@q$+_uiJ_cTQ_Nj;o2R}xK@ zrz!Gul6rd5T&pA8aDvzBj5IYjd*$`~j&QgYSZkZNzOA{fs{_~VT^;Ss;S^*^K7JY4 z(6sdLj*>AAWisE`Tc~aAWv_ykG{lgy z>=jS%>FB}|^oOr~c%p-V2%986p*X)6!+4_GK_%&5)>qmTNz;lM$v_w65)YAF^)5jo z0Ci}YZLj7@yy#HZ6D`vUB~BF2FxHWpet=t;YtNpX;)?Po23tSQaX&xUTAl0u{b1`k z*T&qJ23uA{A&?;Gw{gXVAeI(Biy2cgE>Al~_N^alhraew^!m&u@L` zx*7A{EqSM3S7P0Ffcq1_b;nTm_e!i^4~t{T;_5?Af2hRTeu(>rCDva@xF0XE{&eVu zoU2Q$w+j34Zgt{u7 zQ1+bY25{YpZalp4M8Mxakz}nqalU2s6%%?>G0}au82bhH6uTk&SH<|<=#Ju{#7Izl zR*ZZ-ej>ZU@4l|Yy3J4JxXVwv|H@A~KI$iZpY((9IX@NkWxxBT66)2f7~~XYDx9{ejPVCf~hjob`OZd&4*@?s4BT&e}A*;%%R`{$TgIan{cdajzU_ zJuw1IA1!ph=(9d4bbscvoZam(ia>_QMo-;41}K0gvX9oY)>wBW4}O@035eUhFH+L5YJzsd9v4z&GwB~ zI#gkjvBtLzRL~$!J5_S~o(fQmHU05+4l;>7E4@Kig(b6Uv14Up*P8fDd;$riQJ=pN2K@G0(BEw6#0{20_73yUpyrqOnrAnmTOf0kPhLlp=U>Q&qF|$(D z8!*XTN~+ma#DbhkSK0DS6xP|4Rm=zBT=yh5Unq}f_&w5Esz?;b`iWO;XJU=mY+8mp zEMN8~3dL=dZsmRt?M>UUt%<*5+@{(iEj#tnX1E>Oo*0cmqHd>XS}Tx`max@I3Os@; zK&_F|Lvn+DoUyK{U%sGMNyH=7kP^QavV?&OiJ4rKO*)WV3YL6j_;zfnh(}8!j3aC^|syQ2N;zCiZCw5@z@`*%{ zZj=>KqOK`e=%9P5AFOA1qQ`?kv;^T!^y!CBVGeX2J7kDWe2!Z|uwxV12aR)Mq8pjS z9EI#jiH&!9-o^`j3lSD*gcFy zzY1?fwR)?|KF4}|wP*V4v@CP5*g#75cKGGpC`1~uM@*~jWlt?R#C{06EX9E>;w@L* zg}1+c1m=YpV}Sb3>e#OG=|G(J}5BS|6Ff>&||+B^aLKq zN)+P6E-aWlp89x6^@N>jr*>(B+NE7wj~&__PpI|FKD@pyy6>{~!iZnHryU>Wg9?aY z=%pLIC}%L<1j56qj3_CJnW%&6t3Xuc-@T(pujf?x`xBjb08o6M;oUmPPW-J{pH!>Et!nBuxRpf{uia|Rqe@x>5bK}YO;Dl`|;|RS2 zG79>E^@_uAMm(c8Yc>_8MNVjzl6ckiJMLd}8wl;#3wGJ1g^?{7-K0cee>mOCLcS>aWDEW6Yj*_Khxc9gRn<$R*L>=6IAs1Xy?#YCcK6$ z=L|Zu#YN~u%)Zp_!`Cqo+!dufo#UT0yREHxdPiMLcwt+|<&){IDvd<&-ItTj;Vx{Pp7jx_S!()Kr==ZQ9A+@iWeuGr{Zk7fmTD z_LdZvOfH`4FY}I{8E){}brJPoV#x&g{Vd9y^X-{dPIp1hk;8`MtwM7s05(!_#jsT* z8-I({^?Cx&%rCeh_ssml>s@E&d;12RnLmD|yCT2%n!y$Mr9DF`^J`tVy7EhbROF8b z1t^i@%=|oLos46#;`K8+2l5M6=2qkvUgN6D_x23RT{$$rutHFubs%7QDZkUTAtPU9 zD&MO7!b&AX>!0)OnxH7%klQHdk-CgExbZ1oVU$PMF8RiN`LJ9f}Y$qQ6Kbd7&#M?XdZZWO>Vb%Ms*%k`5JL8%9N_F zfT*uPvc4)vAF};2l?Pw)QFcJZUgKIxV#o$-)${Fe2&3&&HkgsW*6sSM8#c)FC}Df@ z&n>D9wZuDHU8eX|=GWvt25zX`%5?uk2^J&!NagtRiKpNd$l~jj(${NpAIhp(<$<|x zWv#Xu2j-rS2QxDKh~E1a_UTt;c@bY3N!e$HNrxHv{ccz3(5%9eJ~Bd;>3!Ho`ewhI z`zF=f2=TzOj>WZwhIdWwMZyp0)|CDz_`RD7)Qr$&;T7*J$t9vp<^z|7pKWAGE(?ap zl3e0TfawpY`g0jB;*}zCi7y7G)6<qSvPSF6w6hBV!!ueEy=-E^ zYw25@JndHaksQSJrwVt#a$o~KP7iX6n0UB#oFl_fg_k&RkHXJz;3E`1!+{^JaNQ2| z<53DfPXp>aPT`k2@Z$xZX@9?>4?F0mDxAi8Je{U+{D>(nO;h;NG!(-qgx zT%3}qWyJ;HM6o9)Ak_&ndb)I3r#KLK%eI5kp1TlPvn3ZeWlJvE<7PZ^S?&ys0GFks zd-`LS_>o#VCh2wbPk-jJoYs`jT^4?#k|nv4y+`^(mxYbXEXidZl!4NZzsIotT3h7b zzzZ^{(O-ce8-6hGF)Yf6xdQV0fRAQzQn$1-4D{a=cq%GsKJ~y4X5$l@2T)p!^XS}E zoYaPBQ?sCm4g78P43aaN3-#O_Wy~%v++3t_!u_sqB$3(Rs;W^8Ss7s zzQ=%%gPd&oUuM9sG~hoq;2#+9AqaWdOQV=NB3MBBx(u^^3eEB5#Sfo+Piu(-OgJblSpXc;IwV#mYf0cfzGs|A=M3|3SmFE|Agy$87Li4-3L+#;? z&Ng!2$fD2!;q$EM18j*?JHnmejs@YW7KiBun7<4yQoem%RdaTIi`5=!>Y&sSN=nGon;Y>1aUJiinV^ z?{HmZs6biNBwKqHnHMOG5k*v&SQe2Vp6mn zGWD!E+&VwfD4)Tw$s)*{b7A57y3Pn3ZBFaLrq+hg%y4H{OPKUYxl$_JNIjc7t4Pd# zHjt!SGbc;g8?WJiY5Sr|n0+3Mo63@`qKr;Lgw0uzli&<>&0*a}_Dk+p%bd{`DM*tE z9bLhcS&_Dm@HE+&XP=j<0-{abDFSD15g7bsRo(4v9g%5mt@E1ZcXiZ7n%Y{0Fhmko z^im$uq(5hKWYy2g>Mljo2_autsXu%6SK>(ZxR*950hy@&aW$ z5GO|-XjSzc;g)b~gm|Q28V!izN-g$vaDtIYb*LkZ;X*yaVv7iCqSYkLs6xE69;Fvd zniY;zar+K1l2U}(KZPjMuSE1EZR{4s`ST)f7oz^yK>*Ii}XPwY1lDV0I#U?AC=esDkCo zk`om<@(9(T$L>sy zYn_W)>#J(&5pO6qHg%xkh`~j6UBBui#-a&kU1)B2ep4$(K9TxHg>;0)h)CVxM}Yb% z4RewKdosWQ8D-lpA?38+Knj?#x1j)&YK*{&kmR$VFG}m9_Gb$ zbDI|U{p3u|h^---T_EnvYZGIlS*hUxi3+6Ort_fTVP{hoHo`cVa)v_-nmQs~bp@8@Lw`~F5~kw!^tMv4u4{J1H-p5ob&lN!^4dJ5Gp9raX9AT(ERn! zR;ax58Gf>&C%lp2(-lsqV;KHT1N{<)H!*s-|0Un$xQyYqF?w!?yAAkj3@4w_<@!+J zRNnI$P9JHaMCBsK)bxc4r))(GKZ?<}Fr4;rC^_jT80d=)^ivGVDxp2{#^t94Kf%K@!{?3Xob^>^XX&sC*qmb^LqyRFBs0-m-yix z^3!9EkHUTjB_~e5Ce-*`Txhv77|z>8Na49>_DrUn*U-(&)ds&46kSO zs~OJq{5iwJjQ$yglU&~J{>bRLp06;R>r;UJ8cJk0-X04X&fDWL4Cne!HsI44&dWs~ zxu-;OdASxaoa=wJ!l}HpO04aC9i!*vTFr2-=ba4a`aH~VuFqc>&h_~R!#kKhwEsnk z%Eje=&TuaGkRgIR!w$6nMTz)uxu-FlmzVaxC^_qK0mFGcMj6iayvcw+#_(Fm(e{5; z;Z!d?p1j5Ac^vq}fRD-p10>?Ze~8gHGW;oq zb3OmUaIVkW4Cnd`9tx&NWKXWoHyF<4&R}>G@@PHJW%y+bzm(xz&&v%s{hvaVNFT2M z%?#)I|BT^W{|6OL_M~^dwVsbLdS0$SGMwxA62rMZ?=zh1bI^ehhUB#8Q4HsD&tW*X z=Xngj4E(hI^u2CM#GmWgVZg6tIM;s-!@2$&7|!`WX2Ac%aLzx$aL(T~OsJn>ACJPR zzUXa3UEaeOJ-6F9hI2h9F`VlYU^v&Op5feoIvLL8u46c_uX`BI>+2zgb3N(*4Ws0& z$M+dti|5)7Bd~u>iR^Pc4vmjgIPu}}_5=g|O~&U0#^*u<{YFNA5~F|0Kz}&)ohgw% z-2NvsoZF$2;SI>6%QchXoKGXec|G-Ql8zJuYs-rr|9 zuXoyKr$lmjc?%8r1cvke?qmagmI1G2IJesZhI6~!z;LeT_YFAx51Evl_4^pZIsd;i zoZJ6Rg;RaeJ4CwPcQSfjFP}4<>v^DhX~C(_XohorPGLCL=WK>^xr-Uj>pjYFUhg+C zoa?#PfZxY(USGdsIM@GmhI9V!8*sP!NrTfqM>3rApTcl%p9+OjeI0{C{NxSi0Hf!2 zJD=fP&oIMzd9P$R*XKJ7=la~PaMGs?hxVTzGkUJi{S4>&Ji&0T558C<64{OG^KXW8 zeR9-$PoxihEJf>cfWnCn*XJ;XbA84!oa=K6!?`|l7|!*nQ#k2E^`)AR)x_wzJ_{Mn z_333e*XK5dbA5izaIVi|3MYN&ZD(ztrx`uh=dTRs`n=9?uFt0o=lUFa2!tV#{kcBl z7|!KRVmOyu!Ei43e1>zmS1_E*U7~QZ1HJpH?a<5Uxjr{Doa=Kt!?`~9Go0)5EW^1z zFDab#AwMBIQF@)xbA8@tIM*i!|1&uy%Fp#VjNx1#Kf}2`rzxEDps8dp5fe{-3;gY+{kdQ&+QE7a{tV5ZqFAP&h43CIM;Kx0UxP; zI7arooY|pR;l}~v>&&wa^z#k$Q3L(02Ko&K`ppLV*9`O@8t6x;7e1ZkJyzk)@>Uw? zFEG$Y4D{C;=+_zOA286r!05TZeZp{F?}O9}s8rqtR=)=`obwsaaPGGOg_C{eF+S%p zdhXAS4CnqFQ8=|bimN(~Ej7@uXE^u&2N=%%{~3mJJ8U)J?=hU)Vd&vd4vFl>`HWyV zx91p!b3UaE=k^RUoZD?7!@1qAGvL2rIJeIe4CnTFk>R{tZyE5Y`bjO>fyaki74B>= z8w~WnGtj@m=vyH}`@_E&-pue14EU!8JXig|k@V+$4p2DxVG-k>&*)niekj8^pOFT9 ztN}mHfcp)2sR2L3fKNBz)dqaF0l(OQ&o$sp2E5&XFErp+8Soy4b3eI`;k;gMG~lZh z?yUFkGkRX{>ln`Y|I~orYrr2g;Qa=CvjP8u0e{|rzi7b!!EkQRHw^f{7|!kRfdT(i z;Z8g2pWIV>e+6uiF{@HH`jO z4CiwH&G1@Ae>e@Yk#ceH{gKfMC;jViXnp(!{6fa(1V&$HpnrkUmoxfT4D{2F1rd_d zo);PLRs+6(;q*?OF4uz$=W*ae1AgInp^ngp)8A^q+b5*xzsKZW%Wz&_KV|p|M*l3s zV+_Blt^fN5dLKDA64|E-ho(P);g>P|G=_6KoMpg&z;NylpE8`+%UEh8 zNTd(1m$Mnp>91for@x2cmxGrs*P{w2bDYocV{x#m)}Lg zn9zaKr>`lkb>K@CezybPO$s2bci@*R`Uf1i{BD$G^*ivW;`5XP*Y7uPap1Qr`mGLJ z>$%N=pP}ftJMi0-p1U0QXA1wwf&WVJ(Vsk{Te9K^c5(9p=0l&4^=1EzsA6St^ubpJSAs&P5OUdl4sEW*nm@Pq~zqU`+36KacKLGAz~ya zyLdHjFRfa~#&Q~#l+_{ngt&lrYtxqAHKh%P;!=6k5ioVdfefp z*W&^wezSqU9v3+I|A^5qgkIWjUDObfX#ThyhsF<6IPoE$(YPK5IrY)w6er%y_;5R$ z`jIsT`a2nZ8`J0K4Cnmyc!%^^!|3&R$EoKZ6d$q!U$1+O@#pJx?=qZXi?+{HL!6(^ z@Rf|7$4|cg=>-iXr=B{lI`!230?~6l|F8L12rV=99HDU1ldo@2WjMDptszh%{rUR# zDuGJ>T!w3HXC1GcetQt}^Q#$sxdC6OaHpQ%GvL2ve0muF=NLV&$D^2^aD65yoK9yl zKJq)X(hk%|>+;I)&C2olG=^6)K9vk_P&nC>^NASnYYq6V2K>Jn9%XWMyQ9Zd4F4ZS z&-MAO!pZ*JZrc=2r|FCjeW#R?vt0Gc(FyOxq3!l6!+Cs2DBM}^2NMG%Cw`p5>BRZ? z89n#s8H}E{$2%DPa`4i6KF;WQyuHIs+Q{-=i?gP`pW$4erw#Z^2K=hQg1xZk3dVmO z!}8a19b`x6a8H{veDnc==5`<{}IDEe~xqdyBU3v;j;dhSV&>lh!R*Lv&ka%leg`^voz z`csJs(iVmj{~Cc>mR^t2`sn$LUXLPNw-L&szp+aA2m(|(fdC{r(V@SWJcZ$;|Jh29 zT85K8`v0$JVmQ&yQuNChu6+rQtr)|J{$fS{XNDh6gevKEWqN#s260}i#sh@Yp}*Jr zV}=v|yA}Tr7*6`@|L@{+h7-MBM=PX&fkeF1;zLpAqLxVAT&oD5my_p4ebo_et}DWK zaqwl(27Fc2Dw^BbX$hc6eljWR$20NGN|lYM{(t{=Y`V6o@9~JQz8N3Vrvi&Q+Q=$o zH-8bnfto5dzIxeodANCzRW!dIA5iPA54T4`uq{3|#HfUEk{_KK`1hEyXtrP9b*kir zfHID6Iepo83R7j4CfJ|c_}XjAV*AFxk@G*~Vtf0lTzc{TQ%ZgvUy5TT>5SCjlT+=g z2I!NHp}LNaI=H%O4PWvBEy+f-8Pq&7AU+sBsm1xFTBVZfkYQ#V2skBIz zuD&y({WhtC_-<*@mwl@jUt(^d4_8`6_}|yVMfgAXCc=*RSgw>GZVb(%|Ar9ZqN5GA z{aVsW3^s;m;Z&|LJ+?Rl_vhnK-9z$h%5SUux(}iJGxi~WgUYY_Zpu$Q2P(f?<=5kB z%D-eE@<+4e@7ss`tFq)LY@qtDQTe(3f4C3%*JY9aqkYJ~K8t)B&kt08Jr3per}6hd z^KVk}d4Ae=8fgA4S@K`G5BXorl0TFszZVz{wfNW^x7IHDgEXvIp7q}XeWA`r;!I^K zSNHlq;z^w{zkU?g7+=Wr#{XAZd10*M3XZb0nWXoTm^7H?9kN7$B&jcpy=a65xmZ&YORw+RCqxpf;er^cQCcj@z zV17nSSaQmz%-Q63|43%)S9wYPp*WoKX@4i1{Prm_pDLfLlcLV_KYDLN=R^=r9kHX>9 z|6zmtT}p73%CApO|GVEHf2)$O`$3XFDvSI*O1`uHr<^7w>iDbWgIpwiruBZhcIFrV zVKZ=Aui{CUU+1U!GF?04$1){f|35>ThqL@wB7Zjf*Zf50Xvg@55|w`}4lQ5D&xZ{1 z+ow}5#1E~gyv>L|zc$G4|Aow8t9&GX91f@a5ZXt!@^7KR5|aM^dE{ND|6PRq*~*`{ zUgqdm8A<-JIGpk)AhVFK==7=lEO|wFcyij`hx|f5C&KkpNXfrpUR($dgQ5H$)}jfle7HoD!<;J*0d{B zekZ+_ujww=`a9%bp!6?Rgrt8F4xL}?zuKVxY9*i6e0b9NHMYT^|J^FTmZRyN`F~;1 ze}&TDRvAfu>e6+7&G&hO{C*|BR0~#TonOOS4Dz=q`L&9WklaL=Ku*w|;=vU>}b|(9m;m~q5-k{`bKi7O~mHZPmpw3iRl=Qhp<(Fvc z?+q${3XxYjpT@6N`5o$8t5kmae-?St`fF^7F24rUd8;b_TE&>kPhFbMugg#RIAs%y z{&VTCWo`Ayb2&~lR(9s6HGXI81Z1ra(O=8DL*KM*si- literal 0 HcmV?d00001 diff --git a/UserTools/template/MyTool.cpp b/UserTools/template/MyTool.cpp index 6f9802b..2b68799 100644 --- a/UserTools/template/MyTool.cpp +++ b/UserTools/template/MyTool.cpp @@ -4,15 +4,16 @@ MyTool::MyTool():Tool(){} bool MyTool::Initialise(std::string configfile, DataModel &data){ - - if(configfile!="") m_variables.Initialise(configfile); + + InitialiseTool(data); + m_configfile = configfile; + InitialiseConfiguration(configfile); //m_variables.Print(); - m_data= &data; - m_log= m_data->Log; + //your code here - if(!m_variables.Get("verbose",m_verbose)) m_verbose=1; + ExportConfiguration(); return true; } diff --git a/UserTools/template/MyTool.h b/UserTools/template/MyTool.h index 4210f7f..a2638be 100644 --- a/UserTools/template/MyTool.h +++ b/UserTools/template/MyTool.h @@ -5,7 +5,7 @@ #include #include "Tool.h" - +#include "DataModel.h" /** * \class MyTool @@ -14,8 +14,8 @@ * * $Author: B.Richards $ * $Date: 2019/05/28 10:44:00 $ -* Contact: b.richards@qmul.ac.uk */ + class MyTool: public Tool { diff --git a/UserTools/template/MyTool.o b/UserTools/template/MyTool.o new file mode 100644 index 0000000000000000000000000000000000000000..6cb5f68bb0273318df8e09f4185bbf311d2df30e GIT binary patch literal 19720 zcmd5@dvx4Yo&P11(1p8HeDk>_h!O~WItmvLyaoKLWENW-N+HyebD$xCW?qhQ2 zH*c+jI6750A9m(y8g(4Eb*?Ont)Gf|r|R9wnYwP*`;$9;M}zBiKJ9v`BksY@qa-=ddE5~NcY1OM z>h8fc6U1F_&8U)cKr}d%;(CGWZ*03^U(|adPb#gi?b@wwaO4MVo7y%~r}wHmd2kde zUv%$$ekbVAx@z(2nu^$TCfpQU(0Np-jO8wm<@#!3UM3uhc_(5OM>6kNKXo#g-5GG4 znAacfjZVH241W5b&^weWf>XOtY72gA-p=-LQ_MR~ng)0JhVba>163i%oo)|z#d4pl zalNW=ESH42?cr88*Rk0>*dBIiFgtMoROnxO7@Dcn^==6FD$VCA&Gl0Adn&c&iPKDgX{sM^bGL_U-07ZhYs~wdFhjZ%aBrLUgxhjBQ!S8=>~op17~ocSx}zbM z>kqp^vtfxc;>*xV*~*BMh3@a4>wdBRQq^;M-RaxIA$NKOyDtoxnh8#QkG>GLTz*AZ z8OVAl*765k^^VD&-1)>#P>T@b;J4k_c2iq?UvOc&0E;v;$%srkTdF|xM;KCiC zmH3OWmR_RA@=u|0EhbaUVJaS z7Z@G6BwC&eWvKb4)X;Prn%3vWQF%F-eV?dYjmne3?Bk$|=C;?zyjZyY=qB_w zmO}|pYjnD;E1LUgo!fFSbGnLI*%U%k6XETW4Kl>stCv2VdL}a*^?o(`cQNm`8VOFd zK&_kmB-e^AH+Q2DE7uXA41G!rX3qtgWYSSM^-M5(CYilM`SV2tVV=gkpJ~zKe;Lhv z3f8WLwKo}Jfx^(W!qAW)#@K15%z07oKk~oFfNmA>WHgQcItnwRFf&?YCQ!%|W@fZJ zHBr!G$UBt3&yZJWqCBjTQLg3D+zl9Ncuw}54UmbILG=A6U{{9DIWBt;Xw>`n{H4fU z!zoja=r7z`IlZPHuS&MbSW#mr5W=ta~;B1O#l_;V#7A* z3Q?X}B>Jic6B9Obe$4yz>|=BLiP1LxoSVC?#+}?(&n>W)MyjI<=7Bi8i0P-!M^W#U zjp}$L?2S%m!#hY&FeHj)L0G)G#A55b%q%Rjm{D{t=Hu*R`n;s=Ma%ea7}hlmYgnv9 z{dz10+H$M|=upe6Y8Dih39$7V+dQhR?i=ssnoBMSvp?jnFU0(#uzB{|Cr?&XHCFdJ z%oo5-$lYC))%Tv^e7~~#vu8M;tE&E1(0QopCly}{I?tX~{n7$w&pWGMS>U`nzxvSy z&X3+z{rCdsziNr^4zhfIkofVSlyzwY`E+IV{u*azp!%CN&MyPQj`OqX>W6Balhu1G z9;A{hkSFMV#kN2fUP#f>d zghs}PhC(B$OlVtTXt1BeNXCA0=ko(%V}MYsi_;H>yToEC4srK~p91JkyEx1JLB%v@ zYCTW4Pr6w2N5ug$8-9S?;#B*?3KucMI?u5K-1u?s%!YRZaSvw0_aSErE{TP^WLV@b zC7|R-UM5uoY+~9PJT~Lz)^kO{9=EYRxF8#DroE6zLnAX=QECFh7H(*6uI;Gn?UI&z z_f&})G#<0I9f&5fvTRud9=@lTTvBK*vz3JKoPwZ-NXu*nn{Hu zdiGtxB2UP0mFg`|4%1?!VyY=O&>EYbXo_JfZj<|<;KTuTj%#Csn;Q@5`gb|3G4Ev@ z3vp#qf;YM#L4>JIOYc$M{#eV2%zFikI<9Lmv-hfVwTc;Ouy^>y;HP>JlFB^vUqs|n z6~QHdl{V$p=?B9zu!Vhp1UVCfe=l;^dr;(X-VAYZdRklsf}9+m^*m(d(Un-sOz_i3 zP;hg*!Zq+(7IDzKft22(%D#KTGvqLAKQIqAsOFYq`ol zG42S7c!Pz##d5eZc8Q4y8um~1Q3O2$9_<4%{B`7usT0F~0gfXySxk;yRkF|*_vH8Jm~x=rIayA!^Qd3%H} z<+b)944u3RU4RmAIMABvx^PmsK2{R|@Hm zhIgX7(aDpzuAh`8T-Ohw1nE^_NS{I3g;^L>2B4ek3F8?3GPE?Kf`gsvV(FkhD#xsF zk>GJ=bj8sxdK0$Uu53b9u^Ma?NA+}VZdHU5*Ng*Xyn+=0E&5k zjCsEi-H)Mbbfk-!N9z1fg)ZSBasPW+dI_9Qk&L%ZEmsbpxO2)2H6uf)1PQ70iy=&| zTW;6WQqWZvga{;?u+m8-Aex=DN4yGMBJK-JkWLyk`V<2Vln7q()3eOOE~-OKzR z|AdBb$$ss`iAt$avSzxfeRiTE?t2ZtQMIwzW8k<5(o0cwbx-8NXYS@tM54nkqYPfXi632Ou zGQ8pw8=Q)Xx{CAW&#T#uQL6)fzG^#7&dF{2mfqMhB%|O~}8$ba{aICJa0qb%+w<}o>F^|^P?W%06t-mYK zQ5%}9s@!#YZGD@dzUrNTWzlgOK>BbAU;7;2?%Mixr9|6b@rkaO=-s?8kn=p}Y!@nkjQL>5$ICBX~%7S6~;GZKn&*1RkdFJ5>AY`>C_`f$G37 zt4}ZIskPKenLnXGr1elJbPAcBWqj|__gB@-2ZL}|<#l5GSS^>h4OclY6wN_R7jRD3#iBgm%rC0z zO2B!SE*9khhhHd)6T4Dr;37U1NkBw%F+FPm@nxeNu4hGYTR@C>F}4TQ%EBy?F`+2} z&e^(Hln2BqzX%eQfcUyn4nMC1d;|#fG6tDAhB^fbGgl*+=WtM^fs64(b0hEy!_V}i zP66kAx>%G4oKR6+R|3u=T`bB2&ILtvT?rIspGIUJaQMZjGzG-$FNgC>QfUe}V(Bkw z2sn);*oQ>NOZ~iB;G7A?sZ-#k;~P`>ykc_aBMQfoP?R<)T+BbC<$8sy=~rm!6}b2* zV7d@+F44uJJb)1{mjZ~0aw&kBQ7#4KDpArDaKwqf46b*M;--M(_l|52INTXZQ^4_i z&2dub(iz5jqBUhL_~(Jwm5O%uTJUcJUufG;xQvsuafPYeBVf%6NhOj-^< zh0%U(6ZapcS`5je%DH&f9=RB}rj?ZPx0Ce7ywLWXX`%17;9G!~v*+W$7aQvm_cW#l zEOHK8@c9U-a`rE@;5S+DtObA4g8!QZKMNDCoIU*({2mK_$b$dQf>&Uwl+(M(f`8tE zf7ycn)`C}If|S$S4xHn@LyW3CU$FQE3;hcgd>y8AIr(>6@S_%-OF%g}^~4cd{`7te z{ZinIoKS&Wuhi=WzNmm}T>}<5cMJLp3iPOo^m&1Y3%K4P{?;PrXTU=RYNHUp?&|JL zb9}>=cqW}pMp3ye9`Bo&h(sbw&6mT`zV1vU(l?MugD^3e8H@IHbSLoB(b1h*KAIRz zN7r?)h~ppXk*0WeawyrCiHv2U-OYrS4<*OOqM|t=ppIs|ZBC{WW66##a4gd%#79#7 zNkP##IF?Q{HaRd+U3b!Ec>$?l9@ zv$`!7kB^OSiAUq|rq$RtIts1bnOJi?KA0MWPNy-p<}&`Q5A@5DxF*-#rAB{ zSJL>#ky{5x`r{jtW8=d~qsxl2tf7&Xj)~D!ISVNGh(vYj7fZ` zNG66~Co+X&5L_z?T%g}FzO@`n2Q9@^7)*`DL7o&2fn$5jm^vl(EmAuKzmDEFAX?6V z6BDIOUm|g3{K}zJUt(x(NKDh``0%O6;sZ6I;R!fxWjvme9*z%=j3v{Vves|l+?5?v z)KLU$6Q=yic-Ok_W$_^?t+2#ESYJ9hoE*t;+Rwq9_QLUU9Ny|Gge7NvJe?dJO7tZ= zhKIY>^$klw$|pMdjS*H;#~+U}te15sGpDqipx>v~gBE^$LqKCq*f2co3@3;CMz^23 zsKnUV;LRf)8`gC%r$O*_OFW(#NT+U%52bF#N55n`ol3(`J{4~%#I;w|-2eYvi)mKb z1!r03DUQqrH8f?VMw+g|YS)!WBhpW4+-dJRW2JhNyXR$nso~K?8k?XT35#)WbB~i_ zTv*$dQ=DFojTs?uIi~Ucu~d8@G15PTB^(EWL>z%RflUufVz+-_)LTk4MeR-4p_;_e zBC%y~TO`5>K7=5RXSRO0z($VpBGQMpvof%K) zH>CXgQ6}DO_y>G=?Jm-l1d(uDcKAVD;~yo6MEc9|X#DF0kx1W#N8@+nKE*`52am=d zFz{st{tk7PB>hGM=br#F5jW|>3TMW*B3k|ugMO2Nw;8x8r^|wGHgNhx%bzjus}1~+ zfnQ_bUAXTs(f(`kXgPfbZrZcMz)ksIG;mY?tbv>KHB^Yi%%uN-ft&QMft&OoqFIn(2jQME#neT1N^=qjGUuVHTX2EZ_;8_d)ISc+B3;vV^f7*f{ zv*5MHx?%b!Y{4(F;Hxe877M=Jg5PPu@3Y_vF4?~>l*3yR|N1=~-GNsog>`^yAd`Q- z^a`{TAF$wA3;rbwZnOU)3%&T`G_?8c=bp>tw||R-OYo0baGU*Kw9tRmf}d`|_4-OX z`NdbS!;38R5ewdG!Sy=rm(M+y$?qTAIO;D=Jm<9H(RN;jG86GOJR0{1B2oToJR09k z5Q+3_@M!#Af=I;M@o4;A#yZwv;H?&1ug`uvKdb2d_&!6FnoQwtkv<1J{f`7@tqlVsZD4f{^2L3&R z-t5DO_Y4vu`SFVt?w1oW=-a@j z$E(wzUuWQ7G3eV3{09bo%)q~b_ufowUXMr1|F(gf{d&@ZAF|+|;v$Yj`Q|xxkAa)# z*dH6XNzeD^Oq65NUo=m^C3wGqoA;ev3g%;O2UJLgD_n<@aFCV z9LaCbd<#xGCi{_Bx0NKX#Cv-k!UB+>>Af!*eT~a1LvB+L^;>v(e%WaD)8|C zX8IHHoAA`B2Gw0be}kVGbqv0N$b^q(fd2`(*=e6(SI&ZT!pr1Xfx!HUhC$ciZYSD zR+QvV!oW$d^*(6e%w>EuwN5iig1!Kfc!nw-WhV`C0Jv?#Pues|E{zy2&g!4+GpUdxI1=jB`Y zZOLdE9~m0FB{{U+X}q})FPyK{i*bra0j9T4DgEejjAb6zb7p( z;B{MV-wn%(DtzPfwTjP9jj#9h5MNe@`OU{^#Q!KtHsbF?FNLT2Zc%8bf z)45H*c8#f@`<~|0xsCsJ&@!3&xzB1oo!j`cg4-$l#U$2|L@E2b3fC3Iu8NkmZ6TzD9JRV@}Fu#Rj*UI&iI#nBx%)v%I~{a7JAja zhW_LJqQ^s@A1(w&{fZ1FnWoK6T!YBf_EtbHL5VVL{C;|Woo5kZr>XrBMUi$Z{pR1d zvccbfo^eF~ef%db6=EE-|J>L8{eKQvIsL8b(4ha0O-d`(e;5tr^lw&&uNlQi|I>EA z{?~w&(|`O;>gRjxa{B9+q5?~45T`Mz7|S9Ci28wLx~h|CJVJ~un>2D8v# zppb>*O8<<)*?+zR^6S4y>G#W>&p!x9`YV|gerc4#NDgFNb zSDi_2X`3v`U)su~^8g&rvVZ-lQ~VbR>bW1k{jY(!-1rZu!~29{rTypQ@!NkE^mAVN z*NDTqUEcQ33 z7wfJfr2Sz$e*3>_(Z5saZ`O)buK6{*$D)5!>F-s9)X%+6^Xq>7t406Jo7Devi~gsS z{@sTDhBEqJvgkkYCdZH8p~}TiFApn7x}WSn*D`Vo>gB5-cqAP>_L z#V>1fzaP{W+&M^;G@ph)toYH5qSU1L`CsYHr0v&O3;4^;{|VLq9ZDhl*N8{+YrnAB wr$UGxPz0{phq;|ohM%9!Frg&vT_mArHG`t?_n&u%a{MQ*kPO%O8vXqL2ZQcGT>t<8 literal 0 HcmV?d00001 diff --git a/UserTools/template/MyToolDynamicMultiThread.cpp b/UserTools/template/MyToolDynamicMultiThread.cpp index 51f11f7..4c56d0b 100644 --- a/UserTools/template/MyToolDynamicMultiThread.cpp +++ b/UserTools/template/MyToolDynamicMultiThread.cpp @@ -10,22 +10,21 @@ MyToolDynamicMultiThread::MyToolDynamicMultiThread():Tool(){} bool MyToolDynamicMultiThread::Initialise(std::string configfile, DataModel &data){ - if(configfile!="") m_variables.Initialise(configfile); + InitialiseTool(data); + m_configfile = configfile; + InitialiseConfiguration(configfile); //m_variables.Print(); - - m_data= &data; - m_log= m_data->Log; - + if(!m_variables.Get("verbose",m_verbose)) m_verbose=1; - m_util=new Utilities(m_data->context); + m_util=new Utilities(); m_threadnum=0; CreateThread(); m_freethreads=1; - + ExportConfiguration(); return true; } @@ -33,9 +32,9 @@ bool MyToolDynamicMultiThread::Initialise(std::string configfile, DataModel &dat bool MyToolDynamicMultiThread::Execute(){ - for(int i=0; ibusy==0){ - std::cout<<"reply="<message<busy=1; break; @@ -44,8 +43,8 @@ bool MyToolDynamicMultiThread::Execute(){ } m_freethreads=0; - int lastfree=0; - for(int i=0; ibusy==0){ m_freethreads++; lastfree=i; @@ -55,9 +54,10 @@ bool MyToolDynamicMultiThread::Execute(){ if(m_freethreads<1) CreateThread(); if(m_freethreads>1) DeleteThread(lastfree); - std::cout<<"free threads="<KillThread(args.at(i)); + for(unsigned int i=0;iKillThread(args.at(pos)); delete args.at(pos); args.at(pos)=0; - args.erase(args.begin()+(pos-1)); + args.erase(args.begin()+(pos)); } diff --git a/UserTools/template/MyToolDynamicMultiThread.h b/UserTools/template/MyToolDynamicMultiThread.h index 6e5d480..a0606ad 100644 --- a/UserTools/template/MyToolDynamicMultiThread.h +++ b/UserTools/template/MyToolDynamicMultiThread.h @@ -5,6 +5,7 @@ #include #include "Tool.h" +#include "DataModel.h" /** * \struct MyToolDynamicMultiThread_args @@ -14,7 +15,6 @@ d and so will be thread safe * * $Author: B.Richards $ * $Date: 2019/05/28 10:44:00 $ -* Contact: b.richards@qmul.ac.uk */ struct MyToolDynamicMultiThread_args:Thread_args{ @@ -33,8 +33,8 @@ struct MyToolDynamicMultiThread_args:Thread_args{ * * $Author: B.Richards $ * $Date: 2019/05/28 10:44:00 $ - * Contact: b.richards@qmul.ac.uk */ + class MyToolDynamicMultiThread: public Tool { @@ -49,13 +49,13 @@ class MyToolDynamicMultiThread: public Tool { private: void CreateThread(); ///< Function to Create Thread - void DeleteThread(int pos); ///< Function to delete thread @param pos is the position in the args vector below + void DeleteThread(unsigned int pos); ///< Function to delete thread @param pos is the position in the args vector below static void Thread(Thread_args* arg); ///< Function to be run by the thread in a loop. Make sure not to block in it Utilities* m_util; ///< Pointer to utilities class to help with threading std::vector args; ///< Vector of thread args (also holds pointers to the threads) - int m_freethreads; ///< Keeps track of free threads + unsigned int m_freethreads; ///< Keeps track of free threads unsigned long m_threadnum; ///< Counter for unique naming of threads }; diff --git a/UserTools/template/MyToolDynamicMultiThread.o b/UserTools/template/MyToolDynamicMultiThread.o new file mode 100644 index 0000000000000000000000000000000000000000..c20a8812782bc756045567f0e6a8a42ea2572177 GIT binary patch literal 46912 zcmdUY4}4VBmH$g7fdHC`|D&RgIBKGhW{`j3k4+#$-pB;A1S?&oLkJTR&A%oS{wSDW z5@b4zQnkCb;ubC4+Sc8&)hbogD1U;rRdlOGt%_a$Fd~Xt5&uy3d+wi^GjHaR;M(2a z?|yiB?|t97=bU@)x%ZxX-+e=;*FQ5eBg4=s!?@H)+-Vf&yA#)?;B%hlP7^GtPJfwQTueK=pHu4m!wSJ#y| z&sNt}IR66#58W8fbBrs@@L|a${K0%dlKi}@bF$Gf{P9Jg z7Ei0q$dx9PDcekXnNiZ^c;e+bDA+tc9M|G%$BfCwRa?zyMZVczkw+wExFW~XR~N3B zq%_`9k#7iX`zuUIFckDvpex-`QDtmhSGP`4md(AwjJAlP2~$Kn3qZBo47cn7cPX#p zfD~*-ySu=x&iO%~**{~MWIUr)%M=fVSmz1dci^cRc9{L$_Yxh_RrAZP-s%f)leSn_ zb_GO-Kauu-FW-#FERowbfYyvWzK%|XPnu7B?l4PUaCYw$aMoTk8gl3Oqwf8 zj!+W$HwcPHq~s^f!q4M**W!skGWvtc-uJQZYnUZFLMNJq2h8x3D(n3l2=0DA#|*2>ZTVFAviV!#7e{vO zyW0%EZ$_yR7@j_n3H2fDp{$W(Ms5=m|5kk$#ZA268$S}j7 znBkpf;V$!uLon8kw*NFso(l~%!@`#S$S=v3RQ7qkNK>APiwZN+k)J5+N|zb+7W*T? zJo5dsq!eX}%HB6aAbo4!@9UiBpdp$Gykl>iMSnNRpk#J$%h^mFM7+qdNt9h#_N{ms zA74O}tKKWfq;iOc2zM>5Go8(S>b}m|vJ=NHa%13=NA1i|Pn?B*cX1rvmEkNr*{xLW zH^bY-z?(HN$h=dH#$eLIHmSwHEOf_@fWF zH-N%(&==_hk1gVk8b-c9<5P5NN?k9UsWIBX1CBgAwP=J&M8`ROKO3MA4*{wfvztmZF z5>rH9fcr_yP2R zqLP3K1eEkSZyF<5uXXRl6VLP1ct7wJ?o_(@!8dXWo`5aPpjBQ0C8kVuCl5ZTrA6fK z-Bcblfi2PqqDD5lHwYd{H9|F^J}nU7#aFvPoq!PL#PSA z7S*l#o7i^o5EVVwU2jH3H!oPl=6-qPi065;`2_ruEUW3xJ8y(_>)fwGzSB1>}2-h=4HBSTC_>t284 zd`LBX(G_aIdw2+Gp}qbc!6}KYWkl6O>ATL=XF}NQ$kc))a?;{p)&N>0#QPAf_##V7 z{oypCQyAYhc*YMyxzuu@@u82J9oga{pQ!9T;_Rsc&$6!0N$9)t=+gPv(ivJCGu+P& zqD^`O!nl5EbbIR@#z1fxtfNMZvd!>Q^sXX2>+UZVe9cHi`hpl?&BEV_P$T>&vjpbd zA{b;9?kJs{(cirZxhZ)Ll8bnrkNbDzaGqUe$>(i*dJj5# z&I70Jzhx{Lj(8zH_+PuAJW^Yau0Pp>CWBUix}#m0(_fJTA4K2UZyQq@^*6Vg2+{=?)EfQx~z^wu5jQua<%@7K0H+&eS|Yhjtxb> zuiJwTB`G|AjOIFM@Wtj7>J6aztRo^?KeEgD*n!xkxaq$s-mv!TwjrG*_B)IRb?f|L z87KK7LsQxP@e5*iM6&D0jp{FC14)U_o#&JqWmi&k`lK&>1VwC3#IheFjHNC@#HiDA zGMpz=>{adTVt=F-jZ}p#h$ixB%-us}3-9%3>7K9%B@jcykz%Gdp+(|MCC zPh4f7M}{Sw4|_3vU^AoIo4um zA^j!^YU&4`&Lt(c$UqhGR$e^b+9LA-%U2j`XA;?N7uhvhQT*GMc4Q8(7luX!5xykY zwy-X|*8N9ZqlR9@DM*#K6Bjh&1okJKkS)tCDr?0sDtsei%JBBe@O%55B7UiiEOO`5 zpncxdDFu=$lTxe8CntL@BIk(y?x7`!6c6|Z%5Ne5p9Uf&hX1n zNlq2%kxAl4qTu{pn(8 zApCv}a-%(#Qn-wGKIER~4+jgByL*Aar@*QR*)s%e z&61Pd)D^&nr{VT-YLaH;IJZCi7fcWKQdVDg1tJ3o%lAi`Ox=sXGNu0T9)HGOYPN4W zd)`2HfA~JtlN?4*QU~Wmt)nLyjVq{~d!i3Si0o+9Iw>yK{$_8rt(j7%hDRvfK?JgC?P(iGi56MXa%}49P zx4XO0gOZ=-(TsQvZhR3xb(iJtd1CHB-b;_}z#}R?3BgFQmm0=0V3c&pX%g!qCGd1=-S~IrEnO*36@zv z8PJ+N184H=nA{q? zTe%gL3|-BnZba3ySTa$Lh82J=DleX`p)+JsohrY`qHE!-aGzx9C3K4Od2=4^pbul+9~-oKi99%%k}F%d*(l~QhMnNLy6yceLb`c7EguNt8kz_trM zzVNTee-R1MS{DiQ_1{GPKuO6Ky~GD)GTs7(XjbAGU8AIXaK@NKeF``=s&P~-G*MMi zGs_mwuBySNR~x~mC`i%HF$1m@??>CmPC`+{thpLC8^*PP3xw=fv$DG6P>}D{*1WWQIl*% z*;E99roVRZOaIbA(?@ELUA2hGl*KF!jQi=%931ND&641z3#SrSZ9sUR@7wH%qb*Sx{j_zWDS} zYO?Rvx^*<5^}0upiQ$5=p`XywPt1zE9(&@bA zLK3vX6LHdKp(hRMItYEf!tn23azz^iD`CnL%zfmVocg(zV;u4E)E^<z`0XG$8gtG{RH_tkVQ{3RoSIdlm((qUy;1h&V@>WUiiE z7QkL1HlvholROWj8e=m$)axE9ESs5`tG3ZBelrO9s&wL&F0D^{(Xw>^A^NBD$i>uL z7^N38;n)`kX#IhDd+8_-h~iObBtkxy>>G8}0YtU;kvgin(bCb63M#4=@dzZuEcRpK zE&9O(e_0sfza!3ikKiX*3njps252^=AlBj$SQB;Zryq!Zgn~@E*B+rgBx$~V810d~ zq7Q`9EB?))y@LGXE6ul^J-(c=~|$ZL{~O(DCOoT{-lnHbN+f>SA`{w587@_jN5qBCG`jCHZ|@ksI$c5fCM(fIrP zhorW)yIzM$~o34auR`WCSS|I*mAqLsmpYP_at`=Uc?cu&+v5y&u> z$9#B{s9zTur4PcGF@*7w{km9akz@?YMEwj=$G&_I@rZEcuZQ1Aif`ieUlmu*>q@G7 z>@Y~~*@Ws5uPgTO`xHgteRc_ZT}f}-_e)34gf;N?y1*Z~rodkkqtMMc^EoV_gwCdH zKH++p-B+oGgR9=YaDT@rY>V(5@fYqjr^%4$rzB_^Y=MaAc3g-1pZMUM*e$qnJ}4`E z4PLmZhN;?JtQ*Bcj1>D!>=^kpb~n!IO{0`2w{j@iC_(IG3OhLpZ=%A23q^*m*y(7Q zNd#y&rLigS8mgB3*eej3^gM|f6+kv|QtK9U0W#Ow#1;ATViyo*lZU*S7$jbgCcg&# zsNdRd|NW>x6DSrsl{$)V5)zbY%@ z%&%yReUL)MTzOd!O9>UV({?B-n|AD79B~rPRwrN-=H3i%trk zPZy|o>nT>j@Cpv-CJe3Ajo{VPGAybhw+RIEwtUgAe@HGC3DT;sv*%U_rxi6?e0BC* zk4Kf*^HWK$70(tovvKp3v*!uW`6A2lRsjocd%p$G_0!TDpi*D7tjZVpw%8OjT)4M> zgaw$gVlvNinT-l6d{8!m-d93RzVJ)?eionk^}gclErD1x*HV}k^Q8HLtz6F#Z67<3 zz|q-r2FN5EJzYfRo>M4u_|@1*C@A`2fB0D~tm8Ic}f6X9`7S)P?q< zGlkCdgc|4G1|2?t`1-apR`hohwe~yj8S#Q5G$)fw{0d_6oRRS0XlK6I_=oO;q zXDE)6Gctxw@r7THZAWjTUy=dr*e5&-YoeuQWL_THL{&mfKlH~^NP_3c6rQJ1@S78x zZ{Zpyd4%m-)Tv@~>3+ zz44w&Zs~;&>Ds>iG*dW)Wq>4>Dx!}4K(o!g^%r9fi!7rZi{IMD)CZ zM{Ep*H>v=-$m^17SKc6i>Y{EN)9g}b?>j=XL9DWW-3#n|Z5Z~OM`bG3me~-ujG9uB zw&`EtIz_4Wcs6*g$eBw5_171FgtJttO=*1EeWCme%x)2}JSb$VlkUrn)B?0Qu^ zLsc<1nkurhhvEsLOU%!lhGrt7AB83xNE6G;lr;idn`RMgEnfs1Trpzfq-1oUQj=R% zm@VxX8!rY?yh=>_c81v__82l6M~-A<6=g3G=M&KR<9^0>aBj%HXP6Pmz}3^4+20#x ze3qGg<1pietnANoj9Avmc*)*7B>P9hjP*mZZyjbll0(;j%mMlSoNPP-_6F3SE~@L;T&)KIYQ)hUn<}K!hl2(^b0Bw&zC_vB61O*ck1|D!wB&awihZm zFCMn>%2%iXZg;F^V6aj>l3mes}gvMh2N(54^-?yyzKTjjaV6u66Ha#9xKGI?!kiR^K>gM#2m zEC}X}Fj#}BZ*8EVxjonx!ur*fKJ0gW7>1(cjBkkReh+(87Gs7w(T}Yu_%23#ld=n2 zUEs@T4(dJPVIMwPae8-QBSFS4GyIYB)+hXhADNhu_WlSiL+z%Q-spK=@t`J#Ru;bJ zkFGc^hhA-C6+P>;S<$Qu#T%+VyxOKo^Yfi!5GQwuj5}U!v{)x4t6XeW>W;S z*U%mK!&zc68eLVE8w#B3dA_&L?=(O1JAF@*!SOZ(0sE8UvnOHp%EAwPt4K&^=OSD{ z@varM<>5p7wyXI!eNu(+B*$OUnjiA$8ljC)PyszBBIJ4(%IS|ZfbeuGBT9;5#%jQI z9f-W1gZsL5{o$LRSUVm7j9rU!*_CFrA_qRJJQeFc)!5dL3C0d*&*db$(k#VwCp~*=8D)}Q6 z3GbCfrQy~0Ug#2j4R_e4(R~9xHFSEn`kdu(jF)2zVQcYzqr0=faQ4Vm_<##bx2UzT z*b$2_OfZALo~#SOv<)saOmxjsXySeQN+DFfZy#Dt@7tF*QMn<-)5kg~=|3o^E3_x2 zoUJHlE6TaOvgB=udr_5hYP_7Ms%~l;^zQ6DPF2;psH(H1AO8*Ro{**{*rh1C*?oP! z;d6Rl#j%}=z8FIRbe0L9RmwdSdccWHqX*d0Hy>Xg(xqTU7b4B}Tr#(%rEz9kO;d1b zOWU;`&s?#l7^rDm-0sy>iL;e{9OX_!r*n)gsa#GZ<;$CEni^^=I~qd`OoAq}J&zq{ zm)|za|CbNWyy}vYCa!tKj2V+%1r@WeD0F!|MdOQ#U1N*Kjw_z%DRC9d2`+M(H6it2 z%-BN1z)ob5GOn9rWGu_eIPpY{F2a$pgB61`<@Jla^lUw0NTu}WA1H6L zcxK|38Y-ntq}=q}yjwD-=jPw+n4as3W=+p6SespzTfBNmS?;9nq2;*?9RKXdodl#T zw*VBNM2_i#*EnR)LU}2P@y?fGmZH-ELmo9pV%%G@|SHy?U|`ZDOOF6Fo7Q&qk{ zP-c;@JdtlxZhpBELS+$KYjB=Qw89Q8@pASkKB74Y`z;gC$le>&GtnAxuj?e^x+=e@ zv7vVo$1`=kQT@0HLAn7)7F{IDi)4?2JRDqYSuV28p z!oJYsDE@bO?w%~imyWa?h5cRNNmDFJAL9@_k*Ry=>LF{hqggjQZpoxF+@~7i2yEjR zz~@mLj*4uEoSEYEz7fRhkm9uHQoRW1}ZjKySQVGCax z+m_3=Se5y3T1%9V$b2`gz7-=fufzk?tEKh&$akqePF7`Eej2Zj*=ckT{lR6q2eKVI zoN0y4BAzZB^@`^^D3h#@1KGSjs2xtt#vh~$>5u5U{!z4By0uI68CI6@VPfEAgbvaA z7@#g4Vhd$D{Pa{1h{HI;20zmVKPv?%x5$J(3)B*yjZB7% z{#ubZ#IJWG)6<e>l#R$e(p`7!&nn{7m!ROoj{J#?!ZjJ_&zQ>_k3?Q4-J3FC4}cejC4b$WbtU zuP+?$vIQkvR8AT^Rj%ZdDp%rma@9a)k>E)221(#Z@dl0O zSU70h>LnVd4`)-8!=O<(H8~7?=aVKmjN{XwWH)?7mL@q2XBw2~tzIk190opAOOqUO z){^qXVOYIfk~reCt>i2YLrk!fz~7;2N3n5S*XUaUPmJSwL>UHxblRT-{9M+Lxs;UY zHuOH=qr@+6NkYxPN#Kb%VS?iGec&g?M=v$c!?Vr6&ovVLp-a*K*@oT$*Caj*6rGMY zE>d`x!b{b;1$eshF0sK|ZSco|WABsVs^);Wdjt5nMxx)QIgm(bD(4!B{$JztBNgfR zOcS_U=QjSJt^xwTAdZW9A@0@#PgkzpHh4BV>~!>}0w2ZVCC$HD;E8z2t29^#ob2DH z?)7~0AshTPh3l?~=9(f6g?%!s^qrx5gpt70={eK}FR{T*;KJWjm#F1lqwp?;i+L)v zxWk6e3pV(xHaHbz6pQ!7To(K{11EjfsjgVeOM$;3=ymHc3^DHnz7SnuIy*Gj;4LtAI0V!VhsTHJZM9I0ft_(e=(j@qB%RIb->ffecJ{v1#vq1ZX29F z#7{@>!qA=$ud~6wYlGivgTG^gUyK1Ro!nj<{2m+p0UP{D8~k}2{9PM76QN9=>f2;Z z$-hp*IiRjb0za3{`}9h|HwCUop+w$=Hhk6y`ow%txry-;fhXpJT8GC3UKZELrLO)W z@ab_pUtJBs!ne*Fe~62306#arqAh=Wl%CHPxE?L#N3Dc60e8`bH3j~0_5klN%8R{A zitvuQrV;x%YT6Cts18jE%vl%+VUM;CHxmPa+GWc;9?w|59mH2#9rAc;>ucITSkn+{ z_tko=DX$6DRHCGfO8E-^<254av`DYZVRibo9wSY%j;HWfc zXu?WIf2UY3)P-ULwA;b3Qd$kbiFyFR&Gu#G1l3Ba1~67fdt)$2{`eJ|0J;clTEhn- zHMK!+Q&ZKf+NPr`l`xd8-e8bg!RnpwQ9F@xZ8=-k+GtY4v-!oBkHGFHH@3J|w#53=b*(Gq7<_;0_AoOW&X=|!!3^argn!r7L zRUoUr1YF8nFP|Hr%BXE!4uzYSQh$XSDb_X87;Ih~sz*rE*3yRBIC`P1@d&8Vb+MK< ziI11qgNqh#KV+&#KdGUmJ%D^c)vD&s{U33pKBq*g0mkX1E4c#@jiWF{1bJyXFjNp( zD&AwM)`i|~LLks03$FH4rqy5$#g0-*MeW0QUkp#25U84knrjs7TT0*qa$Baa$?Kg{ z-Ga(IYO_oZ7`Y(_8y8p2T$0+Hy$FY8f#bEMMz*gYf(ljB-rlgd*$aOjrx?cr_+#=R zJyh6-+7rFYg0&qXYKN`pgzznApb59j0vKzqwLqb^I0ss+u-zKL1MGi8Go@ zy#E+vM>nyyrKz>14fA+0>Y>g`v@dB#twC(n7F>u$hvwSgLK{D)3`i%mE!mpCYZXkXr3>#eG764R9GhBh=g>LRQaoCxf6oS5DNs=uNK zCT0go1i%&sFK}gW6NTEQj`da5NFi{76unLG@~~S z)z%xd+EfGQ5ajAE2zOCWMMSIPnsK!3qV0-EHwl*mSYT0mOQ0V9))-XGP~v4+CP9Ez zZLKUzqzw|^z$92gK+K0FvYLerOFRfJ1GSA9gafox0e`P+p@1e;R(u|sDkJfzdVr9Y zIQs@cldL1fsO=5u)wJ&kr7YVSv9fM!M_Vwkq@gX;QPYTb*)m;((*hj$d|V_Kt|`&$ zqN{Oe{6Yeei2f=Z8V?ePMD#T{H2zZpk!WeS7>CC1BM^z`DX+#KBoK-4Ivg5*i9jT) zo(}asg75|$n*JLKCq7FUUdia|8U8e*znM{hyq)16DV)qPmf>GAdhS0{NO2@Ot;V7CoUU-v|0ag}7`}$#TNs}(!{4>R zUD+T)vf5{c4IWZBvHKnlt>^WOp4)Q=qv!fOuW+)@C5+F@j6TBf7{k{xdx$_v#{fyyU zZuW5~Bof(yc2{dV9ItS)bA;h%Fr4dC!f>w7Y=(1v>KV@UX=nIaCbx^>TyB)%T<$## z=W@3&oXg#*aBDl=#pt;mGO!Y{a3A*NP$GTq!lCuwW`qA1!*68tOHafN67l&l4$bH14F4&^N1cQlB%=RU9GbpR z;p8Vgjw)t&Gox>1d{#4jDWkuM;WsjT4Z{yHK4FGmjD0PXNKcO6%kb~vnb!YT3Mc)W z7~XjbZjgx2&v0n^wG8L{H!z&n;{!JMlMKI?@gH$2ZjeYW=W_wWTkuTl^REoQnc+y3&&-n~F9gLBvole4`?U}1^lFR$~(;0mUqjxcUGQ+3Z@bTN=0UNx<24BkX z4NUGjg;TkvF#HZi&&%}$!+C!)^bBN1B0F%rQsHE0-rm2>aNgdR+wghF20#5wk}B(E zD$crI#wgrcFSjy0$mmZQNnBFfQMC=;%y8bm4lXpS=v{{BvE%j6{4m|7k8l zCC=$?&_zsm-Hjp4T-tG4rya}#{Hf6iw(m)p&7uIK#>=kzZzoYVi)sPuCC7|!{; z&2Y{q@4N&bPCtR+oPIvTIsNqv=k)6t&g=1KHu#GS=k_dfOFc7jnubH$f2P8z-v5o^ zl{WNqZ0Kuk=<99h@w?$7S@j9q&~LP%|G5qQCL8+S*wEt#;?wH$gbn?hHuNzY`U5uf zpV-jPlIn=|#p|8=WJ*~$IKEb(5?_RCUBB05$OmL+3Pm;kwpvHh_$Wzcyl=tBDg2-X ze?Z}Xv*6!X_z<=Jqxt_?;rau0jjJ%#IMqV`iK0Ku>_+<2&tg%^K3|?SpL~TMZ^1_r z0n#ZJyjP_{EC8(t;N$`YSB>oeH0C z!FMY>Xu-cwc#{SHrNTQb_(cwBx0M$BWu<4Y1$Qd?s0H7s@E=(42NZsn1^=bu|5FRT zRnert|{uO}p&g1>5mzhi@cW`k@0waP70_ORmgKc`c&;&W~A z78`t(4IZ|^H`?IhzhO+H&vQ2PFWKOKwZTtRerheR{O&r{4r6WTXW8J_*x)N{@E#ld zhc@_cZSY+-xNgVR@_t}LpQ{|*N^kd`ZnOggDY6H>w3u7 z@qfzf+y=&w{bnQsK6r?mn~*79~rI0e7S248Q3KV*Y{UF9`x_+O=P zYk9wGL*H$KM{MwoHu$49_)|7Gw*z0daoNycsBo%xK0l?oKP9S1KJWW`^Dh%pQ~hVS z4X)!I(sLP(f&6?l$uFyXDXv_W%PF_ob0xS;WTHYM11(XSjTr({s#ngYIzUa z;MuG{=j9#2@P9{MZHJQ>&f{A?KK_c)&t&wx-ghvZmrKVlR4&f{eMZl59mf&wZ0L1-XVpjk-bbpPzh%Qm$E{XAIzA; ztDReH=x?#X|JerrcN_c}8~jBZe7_C8O}dHjlNGqu{`nlkxqaSc_$o%Pe@~0-a3jOD zUy=Ar89sy6V>iPW;+hiixeJHZ=Y9f_tT^wV{{_!A{kH{mD*c@{xSq!of6k|n>B;q> z-@B(|)ra@n8}MA~!}r&5KJ-68Q6fI;acFwFr$mqGTI0tNh(y?b;Lx~wZ6)H>Uo-q{ zTvH-GWJ67V9)U>2hsIWo6Gn-OH&R?B{~?@W$UFtBWfDREn`BaPcQpFVHuPJ7r{n)H zqu0-g|AP$ImxSB#c~leO%=!G5;k;a&Kc}D1==0Sz$^8z)IUmlS^Ld=nb3Pn*DH`I> zanAn{#)tE-VL0cn+ZsLQd>&);oX_11=X~@U8$EU@8q!nGSqLg%_%-U9PQ?trmf`x6 z_$<{oIJ?v}`E$0qrjw3830GU^#8XrJmHw}m1_iQ+;gmo!aGr3^ow0F@dUo~uEe*D&14aJ}xK_32a3wlew-M!%cklt(mJ zoZevg35@luDBF;M9NhSTLbb^jj2)hlI@W0H!m z=rJAocYewkPWotF^*&yrB~0`Bfrb7`Jfoz4hg0*{zlWpu-x7Yhpp<`1>Nt@ISFaq! z{Z59{GyS_R&oP|zTp*~?$0{c#TGCUGqvtc6=>5u`r3_E%pNra;H-%~z8b$apQJ(Ad zRa>yJrU;)J;0GBN;kO-(qJ{14h5(A>XEkYm%mCjjt87GN`646f^91pg;4zTX_t!;j zEu;%+>nXyIJ0uDFI=>YDd;IK2vbOf$DWoyXQHZ5kxY60CG=u*lD2>ps#>b)$zG!T? zHrTk_C|X>L&t{g@23tb`cpAQDNvcfzu*cu6GXK{;`_?6b=l`F6)os^4{x3f?|NB+% zF|^fzzBm1UL&>k>8&X!1_D~JJW@=RpjJ^}be=pHigC?nZ({x{VC3V$al~?q=Xr{=B z-aYlZuar!(w@$}jeIh2+9Efk6IKgkAZ^qIy9yk}1VE?6<4fA8nFwjr;q?MAADQ+7Y z0xX6c@HcqUi?APUw98oiLHsyN(b0ab1iuQ^M4xOMMfgA0f<^eBHO9a%@tM6;Ay^-% zqyGd2!$eyPeD!New<6G_ILMkx6`eK@kpF7XQqr*) z`?Ujs^9em(97 zD_{SgJ3K$l`>pw1z(^Ed8^NtnrR4woe7Z{{nv3#uCT@slDF<=gMaDv!rzXKvf2ZQm z?J=JoB3*!kM5*aFK_Y!d57H#1R{Y6mToF&Z{8oDFy$5*-8$uIpp41CmD_?8*X`LWl z{ydeR`154VKL-zm{Vei#cgtMGDl^%SY;CpQYGCQ)S1I`uRDON3%3p;C>E!41$Q-81 zNb=9ZVU@oL7|GZBk#H%}Y9;?08c=7VrKHc>)V0^pJ(CdvP-;Kj6|MxcX^Tx>hQ{^fC*INF58~OK!WtIghC&{NKZIypA z{2nY)>|dqidsTjYvdVWNe>(f;d{5?BrZSTJ0vuNPS8Mr-PM`is$^X#O0f0OyU8&_K zp@z|=%F{}f-`f5TDBwCyjB71lv;DrxpW^?Q z$c*~;+cg`j{nsLYI{P1r${ZV1!N~qaIJA7-|6dOKkSSU1qR3%pZ>HkY0U99r=OqBT-Xhr31O8<2&@~4wu z@O_!VRQX6gRf$#pDOmSSC%?2rX4JnkuGpl=&qMxn@>`YsK9!N=kH=w^|GJj1==5pR z6nUk(aSf-)r{8H1^5YcpqWD&M=IxhEYL)*H^3!_})y!lj{N1hOYkK`a-|O>!8~OJt z`PxsZ{A5y{Pvd`6<9u^#c9o6(2h!+I^KYGB>;LaI z`j;yCE-gr%t@p3n=)XYa*K#x)EnnZiY@@&HHc4SBLbCsK96G<|n>SR}zt#R-NZs{k>4cs(+J>{LMD@*ZFms>TTq2N+Z7_jr@CTDO{sNVc>d%Km=hyZ3td0Dcrp5lLXehvK2M*h6pC55R7N&YMx zI=_~m4S@Q8YyB51`2&@o7^bWL4NCqxCciR`e2POKtRTRr&Ea zDOvAJ0Ho8u_y;nZwiDT}3Wv_G@oNB(PQ{Wu{&$&@KhXT8UFDBwR2M%}`K`FM{A*Nx ztNrd(`R8GL;YsVSv2FmO{2EYaQsbom$DA*p_-1*%L|`~5fEq}vPt literal 0 HcmV?d00001 diff --git a/UserTools/template/MyToolMultiThread.cpp b/UserTools/template/MyToolMultiThread.cpp index b68a09f..d7a400c 100644 --- a/UserTools/template/MyToolMultiThread.cpp +++ b/UserTools/template/MyToolMultiThread.cpp @@ -10,20 +10,19 @@ MyToolMultiThread::MyToolMultiThread():Tool(){} bool MyToolMultiThread::Initialise(std::string configfile, DataModel &data){ - if(configfile!="") m_variables.Initialise(configfile); + InitialiseTool(data); + m_configfile = configfile; + InitialiseConfiguration(configfile); //m_variables.Print(); - m_data= &data; - m_log= m_data->Log; - if(!m_variables.Get("verbose",m_verbose)) m_verbose=1; - int threadcount=0; + unsigned int threadcount=0; if(!m_variables.Get("Threads",threadcount)) threadcount=4; - m_util=new Utilities(m_data->context); + m_util=new Utilities(); - for(int i=0;ibusy=0; tmparg->message=""; @@ -35,7 +34,7 @@ bool MyToolMultiThread::Initialise(std::string configfile, DataModel &data){ m_freethreads=threadcount; - + ExportConfiguration(); return true; } @@ -43,9 +42,9 @@ bool MyToolMultiThread::Initialise(std::string configfile, DataModel &data){ bool MyToolMultiThread::Execute(){ - for(int i=0; ibusy==0){ - std::cout<<"reply="<message<busy=1; break; @@ -54,13 +53,14 @@ bool MyToolMultiThread::Execute(){ } m_freethreads=0; - for(int i=0; ibusy==0) m_freethreads++; } - std::cout<<"free threads="<KillThread(args.at(i)); + for(unsigned int i=0;iKillThread(args.at(i)); args.clear(); diff --git a/UserTools/template/MyToolMultiThread.h b/UserTools/template/MyToolMultiThread.h index f8fa067..0c5e16d 100644 --- a/UserTools/template/MyToolMultiThread.h +++ b/UserTools/template/MyToolMultiThread.h @@ -5,6 +5,7 @@ #include #include "Tool.h" +#include "DataModel.h" /** * \struct MyToolMultiThread_args @@ -13,7 +14,6 @@ * * $Author: B.Richards $ * $Date: 2019/05/28 10:44:00 $ - * Contact: b.richards@qmul.ac.uk */ struct MyToolMultiThread_args:Thread_args{ @@ -32,7 +32,6 @@ struct MyToolMultiThread_args:Thread_args{ * * $Author: B.Richards $ * $Date: 2019/05/28 10:44:00 $ - * Contact: b.richards@qmul.ac.uk */ class MyToolMultiThread: public Tool { @@ -52,7 +51,7 @@ class MyToolMultiThread: public Tool { Utilities* m_util; ///< Pointer to utilities class to help with threading std::vector args; ///< Vector of thread args (also holds pointers to the threads) - int m_freethreads; ///< Keeps track of free threads + unsigned int m_freethreads; ///< Keeps track of free threads }; diff --git a/UserTools/template/MyToolMultiThread.o b/UserTools/template/MyToolMultiThread.o new file mode 100644 index 0000000000000000000000000000000000000000..eb6f3e275c35b5a3c3a976c8a8b204f808fc7792 GIT binary patch literal 50032 zcmdUY3w%`7wf9MeNEFP3iZwnNaMZ*{%phQbuY_chGcv)Hpim#3gfKvANYZ42pdg`1 zkm(TRR$6VvHf?G3-b-8VEk#t+pa|H9QnfAhNn5Op_@J$Fv1bzoSDt5-p}vb zKTgg$|GoBFYp=cb+K+S2u*4s%&d$oRbjq@>w32rkwXF8F$?I})S#I5E-5_XB$32xf z1IL*-&cZPY2VrAyoQ=ba;~X64;y4cnclU7^mCaIRF>RqE`=xmsP;b zbzO(^7u0n<&eL%4WXE#ow%(3@WcLi_+tHzRY)QG@Us-3DZHwgWp-e?%$8O8Dd!8zg zbd`HHl0jxiD?N7E>ye>1#EEp}4NuLUUF;s!OE;E#ly$8g{X{Z}zIT%#NzLzGGSTWf z;fYzG7P<9yY`P6)DmIc{c0cKIBJuL=7i^w50oUScYvn}ihNmi~S6o+dZN;=}WewiW zwPSbPOC@6u4bZ9RS$p$8UGCnmf}I`xAK#$woxPufUj1{k?C2i5>;retK~%$zjf@BT zFMJ0@tnOQ(((A!kXG0)Xoon|m@CbQ*gYLe`;G#fVfPvWjT)S_71XerFcD3&b#?B!Y zf&N9_TtV>2ak#bn$u+EC^i5TWv9)78Qs27=h_dKOd-FeBcG+9*Wp53!tSPa}3)8fC z8X1SwA}KxqYX)MslxGy3#<=Ck89ySM$5F2#UOQI4XG(07XCEq4+jr32I|@82x|ft% z?%o|PT)H2>rBZ8SN4IfHu`&E-k4x8q-bmT#H?*S>rk4K?|I*#dsI0bV#wz11#pC+Jt8lwe-a$#NxSSo z$8I}zb3;|EX|5gfr@F_5V5vY^7dy5%cUw&^1rAinu8Pjtq7-8#)7ZR$syNtm9mdP3 zi#WV2g>A|WdGgZ3$w>N8>}O=0M4RzSr?EZ%$k{$3tdiO@QuxtLF!rAVB>aN^6b*^K z7})%YOW2TV@u=v!!j{_Lsg_FCA{c%5$f_{s$ax;R6RcI8;2n4GY2aDWy_h<|-KY*a zL04KQP<7Y!Sj;=Y%?+^|(%tPbuu3Zx4Gkw(+g3A(Tp20J3zI#nXG`uzsAJu5Q^k!H zH&uM8BJ>pEVKDlh>WY5raf-3%$R+k=)XSlC-NAzgL2+=0`|*A8O5DuWAwC#AsLBmZ zFE;XHDt1s78;YB4cj$f=e+aj#`^zp7eb?^4g2ed-1ppCMD&Ut!eRR%1(op`fn=UHR6UI_s&GvsWrg3=npIn;5#pK+(UX5L7$uXxlCn z7V@^%>{I6I@974&+3xoS?QN9{B;(3f z*i>N0?h$TWcFzDPBRBeeyM52t{Vm?yggp^=eqqNRU5R`f7Kv7=am0@OjLKB>2fbN# z>~3Nci2lls?z4+_*qaYv#Ms>NlwI~aj6_i}lAz}hliI#_A~OQ9dHFUjYV257fo^Gd zg~#sqmjq+sd>ZS|6jrd6AJq2kj)3%yy}wfJki@(8Bv@?gAsLkHo-GB;t>99<0^ySB&Q?WJ$;L9jsX#4(d{v)m(U_DBU{7X)KXh^xI`5B0=ouXj7~ z$2niz?+Iq@iX4yXxO=yarnt1mTZfE5&&O`}9p{`TuCm;HO`n6~Ztz>n6`z>oL*xY@ z5B6{JHsCyI>kto|tk+wPo7z}wE}6WbHZ~hmp${T1lnI#G?IVz{ki>_T2&PL_H{&ZzkKI zps3_!i@XJi>E|X{^s-ID6%sI{1^d@{Ef^)x-&#_!v8@vv`qz8O^1E zMu2EWqMje_m^r%&@`vCC4S5_+VQS+8pdE?>*ic}w^;+*B&eGOpPrG|x`MhwGQqj^6 zA}bh;c=I2{Mfcv}h@-W!c?Hzx#@5EVFmqe%3H1DPsQZo&++enC@I!>o+P?knzH{;| zt8Z|@Z-UWxP$C~Cx}FWjz{xI?5+m2xv6~9fUp;ouN5{FBMX-pmVc)t(NIzw>t}zvx z=r&OFN?A@xLyWXSwr5}?*i-uK2cfqpJmvQ$*1OC|27H_^N9E4Ahne6D(xxLMAL!PzOU#Bb^MZ|p3?t3014VcO~x!c|QxX^#CiWQ$AR(t{e7U=l|`d^GI z=s(1p7A)3^U}48r30I{m?yyPEN<(IL=(qqm%NSWX7T&_vO1FLm+(03z7! z$)din0~I0{g`v?Cs=WE`q86`5)X9!PRSbg%X*MecU>b^MBgm{nz{KP;A1ko}_lRCE z&3UPCIY}a5y8tA2%nbIBPd^BZ68Ul+q}tI%-Whgu(-3ik{?<*IAf%u}$#yN4p*R@3 z*<;5l3w`_H$O!stMTjDS4Z*B~SSl)YD`ArxN`#W`Hq?SB=F&pIj^5~PN9Bd6KtBR2 znTF)j&0M-6JCHpFgl57Nw!275au@Y_+kw-$Tb)%`4Y6$cS8=b)pJpOX+7y_cfPOFw__WQv0>J2uVhfvX5c z@I4YjMK*$$@0q{5Z(r{n00bWygK=h!5bS%N)W8gs!a5063d8fHTkOkn7p)hpRP`2l zLH-wYa}-fgHfW%K{#;cy=q-Vf_Ce-sciU=d5;VK0d4EMg&!2P15Ov76`zsQS;x1Y( zn;VhXlL)!WvC#!=ZdN5pGIxF*##Wqfe=mI*$N5uilWBI5{9E;?_%-G38e zI2z?9PjUo!`xNQsChM08E$z(cg*@pcPCVBfzf# zIYxlDj(!AKD)i?gz#S)1O=tu-5m*0{IB@*!hi0+ z82)FN!#`>v1_&Mg=N?P=ms9i4B>Y#QAEk!>6A({i_}_uve%SCYqCUm|gu$bV{h(LP z|E~ppn*UO;Q_Ro>lfi!$g=QW3t5YL?HCjqWS9j5xV~G3>6!bCb)8qoZA5#K~{43HU zKV}|jL0=3N(N%)@*dza~!zc#ch&dGo<>j!!6D9bAEo(79qM6vs^ykl_AMO;>#+4wT zk#0Z`is{ye{(VtL3nw*ZuEMyzjTDZ01QxF;5qq^xj z2-0rc))LX1LjaTwy6<=m7dB?G%aKE-D#do={y@=oDJKTsBRt|Q4MzV+`PwBi4gVO{ zbz@>&UnTU#0G^m)r4Ht+j;%?jQIpb8Em}x4sct1XX3M6WCYD;FT?1;A7xRfDZqgo@ zFbfWpt1Mi$-MwrCj3rhr&Q{p!p9m{mYa>ARFE01{2Gt%g%^~t(SwSC&Z5V(x1D9=h z5GQwUC+*Efi|wARxq+z2qPZZ=CiDzkHG+)3Xcsx_hGA(^jTJyQ_~5|^W+61kmy>io z3&&JBIRTGN7L7o>Q>=mcL~=4Ndj_B31YFmm$Nb)o2EBDk7n*<9WBzT&YLTVq-_yJZ zz#rPt5q9)BMK~G#ty4_Fb z;MzDrFDB$Gg!>#OBFpKka22X5q|oe^sz@_qN_KxW<`(P4%o)?HL4rvQj2UuPj2SOU zrm4E9S?VUS+-j`Cv?>?dih6b&h%KYu7>IpyK(MRwwgbACu8YLelt9^w?w*hG=xK^~ z7ljw>C?@xFphvmXL+A8=(02@Fyex<*Iai2`MqItTh6rxp>78vm-Ws_1CskFpa6X~x)DKs||EqRx(uCx&!g zp0sC0bQuL^JBE!6ZF01rN3&Ao?CTQoe37F{)6H_U+ewmIkc4Oe<&FW5&u5 z-JZHH`HJqgw0x-pOFgiW2HhkLGVL+q9jjf+FzOM%LsU~@9TB89HXer z%A}!i19_q+zDIROj_Ai&fF=8^?4uMysS}fO9um2ZZcs>fcw;9H#9GVke(3^w%j-7m zpUDm%eN8`CK%6h6T`+zB2wDYUM7hQ`tQ(Z+uC+l{o_NHgwyQC4+iJBIIfIwg{Px}{ z`$48aBSJ+YJfK#ks2wG$X$jSog^J$)#!Mh()X&0*BS!rLVgNCY`f7I*yadn75TD$= zBf-s98=mgoA7KqjJ&D|$gy-Y@cXylI&UKGiBX_CjaqBmT5?e)rRdkgIZ7I=iC>Uj$ zPKarfoEu{3SgWQJm26N*I+EU?v5#tR0N#6}oFSs;(&SL?3z3!K_BYagfa)VEw*j>j zJrJ37$*z|yR?hpK-jCRUQo<)_>3O~6E;b*_ zNj2g|wIy#$%sa$GfI{43a;Q8$Q!8!~ot|yGqphj_+LiQ1&eAP=AXfC!)aQT|+p1~s zwc-!IOF@in3;s%Tw;dCN|r$ANgU zK*_Sw!6-t|o$vXCMWh*orAvg-byrBSw8+wy-ic9uWn!OAu67dqB$JeuBj*oB-_p~a z#Qqfc5p{fzyms01vaf z%~$xrC-6S8sU5#iOe=N$ItAVe(e2r{aEpQ_#b&{fo|gvQ|Lfq3xG z3dm8w18^om@iF2c^^KBv4SKME{!UPIXMDn5JqNb2vE2L9-Eu8Zhlg6itTdg!A0QsT z#lMa-&Brk+ua^FYX2^%F-6K}Z5r@J$1*j5gE<_&@>k(D~Zi|2MfbuIj=N9q?5Jbs6 zQ{-I?2PAntB2#u#@(l_l%qQwdNXEdZ_f69RW6_^T+^5O066*lnA1J=QF05wb$)ZwC zYKd16QscFNV}4Kk=V&#}fGm-psNoT_=zyn-Hyz3rc}w)@M{9#ug);F$Ap&nglzo)U z#d{Ajca120Q(-Xn<-%auUPLF`UHt;KsUstaX#i2jBPgTW`Ubm32aERl4%$mmZF&-0 zORSfEgof#Muec01(bqS>H!40sNF<&!!Sm15O%>0RB+sr?mVag}Ki!0T<7ca;qskyl0LfcDKcpysX1aWRzt6xsdt5I^t-|}odkk*r6_BP#TgSwSq1sSKW9yu7sA+Dv>E?-^P^i^Yc+1@8 zrn#QxPR}{Fb`^Q9yaKG6J1@Gjv-#F=VUZ)7m?6z<>}+ldbw)axTjvO_O>M2X>uQR4 zTDw|WJgsdJ&-})g=2?yTWF%@cY_1+P{h3ookR zX1}}lEO9dhH_y0xH-jz^TZn~75EHBG5Yd9zE-3(19_X*A3&g%CUK%?=Lp4}M%t@sq@#*!e)45X4C4{-do_M9 zH|{9|I?|ep>i#Pf7YcI2^=<7OR5=!nz@ja;CV(G&r@i(JU4!m zgKON5kr!uXdbyYLJUxx)=k4ej-uU|%9Jr!ysqT`QC;AmvZ;hCYnrrseunO@<@3VZ85JGucuPKR4GxPK=fl>C1ZWa zEk%U_2uOi`IqoK_6hK@Nb()@5Cu7HsoFzRi*{LI^qC-=ix=BS{FZgHIn20xw#0xdpO=}R56uZx$o{@Z^M(vkH zUr5<416%ZH8gAgnG>6)O(y}d}W{FbsaPMG7U!^63*kEC=;UpT3w$CwaFlEvod&R#( zk3PVKrv0fqR|zfLy?1Ht;(hu`OA$(^c#*n!TAbt86MyvpH(o1*ky`XBUsP}BtCxoYMe<=Sy|?J@I|xQ``lFjBAqAwFc-5geHTilWcJGgq^x|O$J)acr zBdVW#F)Wu_56fb{T`XTEBjY4@iVxYUa9yY8$BSSYXvCSfS>c^Y^=84r^|=NYjttXV72w)*qoi!)^Eq3SytHBKFJu zBzBdb#L8HA@V?Pmr@Wm7ir;7Fyqj&svvWSmwm!)D5ZRIT4)ZE$`?uZ~} z@KiB9fL9nEp@v0rCqB;Xc^gl5=U|0$DLoh|+9$tR+2QV`cki)^QP2H82}D1z-Toc; zEGBD*9sQg8>zji`f3vY7>H9Xsh-dN-9rm_(;RZjB5#~utY^-p)@P0(wa4%kJ#H$*$crR#60h`$A_jaQ4J5wrjlAiqU z-HOFPuc)Tns?zgLaE<)O-kmi6dK9}4l$Kx{#g2{?`>hr}D}}smcGgpI>P$rUu%N=V zUAEnJ*KD@?&v=?R@7>0CDugWhld?W6rS%~;&#>2xk51jazg2se1#*$7*MmzGj4(-S zM_NSEGO>ZSmgbe;*DE-^uA8V7NG#PN z<7pN5A>tKVU%GoM!3_)A_zA~jhpA(Un*F+aC!&C~LUO)~cgTR!B6fw~ zuf_vf$X!kR)$&R&48vdOi& z_qydezBaZ5ZhkNlL|qE$rJAz8>upzICH=e*+qkC&i~eqx?OlAby%F|bf&*{nJazkH z(VciB`=Q$C$KZu8b`Ex}50*h}q=uZ&!E=Pz8rI**f$3?%=wI!kKiEs@6QCtaPQnGL zx9E)M_P)WO+Xf>FNC=Ls-t+=_?%p?b-RZ3Buj_sb=I&S&+0k)cv5E5vXzbWoL43)! zcO>Es9)jmTj_ymLGje?(y0tcTJ9=lUr?%`9s*1b%4O&NjPCPBb_lBCyAi0tg++v2VjdIfO!F4Zvpj>YNPwgd-r$EB4(EfX0M^U2BSk{APi!; z4+Td1o)@+KThJYNQr34L7I04{KSixAdM~h)Oul5vEL@NRi<+vU2lhU#_G9{QL0GH8 zURPOrLByBn#yt1TzYc!RY8ZbaF2b3sP71ycBi`+wZgPtrjZ3cvsLhT zE`@{irai%;U8>+nS=?(qJiFRz9Y*lD-Ms3j>fu-ob^u*(D&>uIV7WnpT z4|IvXhCAP2pyzgcs_XVY6>wLrB#rP)|=}S6V&WQO|bN z^Xb~Mzd+pYR6Q?G)bsOd+9I23xN54=XsR=%AO92Xo|L9GT*^^(yXUq7E8zCOisNaj z`W%F6Xe$#wOTLJ%5@TD8oB+4)-emWmOqYTcU5GTrcgeK2wwCIS#(Cje+B$Cb`RJ3C z##y1pjyauvO_e-5>Bmv&40O80*s-+>iKMoxCDP1T>-@u=eV3+{KU|Ujukug7;gYg> zT;rO`%88!Bnkm;7d3?U&@x>*cu_a^2m0ap8^At`E&+^!f5tVV#*doiqcQPVn-8$9E zT9BW0`tYH-E74u^f#N$yk+8`x5@99kntpSJ61|Cs>5E7c=&zlZzbv~sFBg|oz5xfN zsq_bVJ=xilPRPrx9#)Z;OJ%zWN5a?97)+>aP?V*-)r29H)1NO;*;bKfRrJujs`46K_qg&(!QH_MMaSWwwJ9YrhYsZBugI>*D_G|8=XrXD zWUo0PuK;y~;Hy!8bt!+>XSpi(Ybq?tRVB-<%PXi7#iYI0E85G6R@k&HVY3}7Pjo5K z77IikwcXn)PxNTq>-NpMwJs=Hf9QS0@pSD2)Ye`?kZ#8@gf5bG3Wz#|5_PJ;=MIAB z45CBAS5mSJ5iWDBz|Jq((P>z6TAi$FM5ySux;1w zdOJH4FBGM=8?GGv0IrEwKl#)OS4Ey@*$}jLPYygO`}MrS>O2oxPkIp5r%IK3UzJPq zMSsEasUWUR9}X4PJ{-{1wG(4%tZ9Igr85Rn$jF zuSYW4xoTMUTgXW7J2k_yrz4{#ttJ8TgE|}ys%>_it^7dfP(wPvchdUl)p-NQxfY+A zQCJo6^x#;pc+&f*!XHtnXXvuyR^;>#xzlx5Hu?06B18UemW(Oai#~uXW4aFZ9%B79 zIS?{RmxV9&G9;IU-%-erT-IqBP|I>zryFqH6H~KY;+MfvF}iaJOJu@Fr-7&z_Dn?d z6?ydYPdd58uZ`(Dc_vM=87^X?B5@_6L24P7_?5C$jP6|G7t1o?=cj{${z3y@oQ6LE zB-={TQ>@X+WnHWIP&#|e1x^s!Bke=nT#7LKx;j&88u-Ac*o?1WhTb*3= zCV(SROw4BZc`CmJcoy?3YFC|H*5&###9sJ>`-CACmHZ611?6Nv^tptDfH`u3gr3iEDo0vZnJ} zaZPVB$cUAi)va*e4~c@FA`$(GbYkgi3a9a#Cwl%w^k;Dp*Q*3R+JRcuw-mj{k@Fpe zJ4>ulI6`+y`VWOWd+<7iCwrtUwn5?Ak2SMR3O`2!>ind@N2j&eHi4)2>t6|64GN6c zK7p$Nkm17wsTv>{PM0q0S_dL;U6x>$mghoG&6Hf=lqtF7h?SP*vRF`Hxh~7;B{I_` zegc3Is!NxZn+^hTS;rf2e2t$WxhxuO)04{@o(=+WrFpF+b6NNfEkkly?hGjLaeBEV zb6NQ0IYV;ERYO{i%Q`6yBfynd?WAVAEU~an1qHp1+Q}?>lW7ba$8}3L2|PKD>-Etc zz%$kAe&C~7eDf#~g9iF-2L5{mo{R!|WO75X&#`%no&j78d^C%bT5ixl-=gparJq=* z;d+I_yA@up&c6VjslLB4;J-BBr=g3I{)39ISm)vTGT@o`Pch)L40x*nUvI$arT$EE zPch(U8*rZizuJJ`Y{2g@;NLaiPXZsq=5J3cyS@sXM_WCE`9R>waaQB`2uYdvTp;js z6<5m=>sZ`X3%oFa>zPUq@J#l6!hpYRz&|nYpE#8COpa21rNJ%0$*%@bj*?yr-($cZ zQur=KPwQYx&jTNwot!7=$onqvO!fMM0Uw5fndnagPI@}!jt4%5&11zH8v6Xyz~>f>i}g0-ej7sz>Dhq#Q4(usoHqf_RId+#k74s_v1SJS5)4I|=$8Q> z!{*x!YAE(#h{{A?2Yd{hhl{l@_`5N5Wum_dxX>T}O5Zoo?=j$4V&ayG|GftM zWdnXDhRjTS78vk{4EQe$`0*HeGx5LJfJY2?p8@}+0sox=j~nnDD49vma}D@+4fy*8 zycAQ4O#HuYz<+AMHyiNh4EXB?d=v)OO!{1Ez^@@3W4|-qXrONfKANouN2!7<1fE5vHaf9GY$bL~3I(pCvfS6C!hYboY=RjsS_OSg z0vf5VmI>TW;05aHJHR~&lwE|bcRqp$j&E;iY;N^i$f}Q&hNjL8Mex3U05_M0LQM-6 z_qeYnt2G0m8=SNN1qQU*Cv7et&&rTzg}4M__XO_z?c#-d7T;54Yg$ zeqU!KP+v;WxR!8dXFy~(3dmoI*Z#vDjh$hC9ZFoHRS31V%?b;O;^xkd#^MrEF~bv$ z%CY|W#Z7H3vkdvZl0a*7q#3XKL&C9CYie0lW2CVb+O#M+HNk5pRRlw!&aRoEKq&1Q zOL0?sJCv!91WQAq=C)>tw~B+6CG^Kfe=eavAMJbkJf@1Jilh|XdyJf64k%&ytyM%INV*GI-TMHaS)(ekCU+CsN9&kDEt zn+~fM8p{eP`(0CAUGJY3npQC>=ntXfwCkMS>3El4xbpYN~f~Yeg zR5!W)l2D67=dMYOV*q`s{T=8~`11?KtvQ|sHNk+V+W zJpfUwR(1hUHb+Fs`pDRlPzYvgyCu}Qurm^#hbY(4*3q1(lFXZl=fzEpoe}bDlo_YX zz=WZ;+3JI++4CZ3r&~#G2CqqYYlb`&N$*$c!Lxyz?my`cQKdZh*g}WyQ`~2ht&VTC z4jFmK6{cN>CQcSfI7-@xq{g!eXy3+`u=5F_gyN23x6p!aP`#5BF{g6N*Oe5?Gp0}OfOp7I_5RDgqkB5 z{-If*4rJ)2et%W_HRxpX!t>^}%@5PLseK`IY`ujd74KLr;nq2kxw2!SgHSy_%~{N0 z!f@IQXHxnM2hd?=IG`}tzqGoUniKPuOoLw8wUK7Db#u7WS2DS|r9}-LQ|qSGhtdX4 z3Rj=W>~7p_xEZ46rJ_BX=BYvJGtmXXwKg;-98uvs)YaPjm9B88y{%I!D8`zEW}#W( zrjGEuaBGC-rAe6Pr6CM6qSt&zRvk-&606DF=GF+W4*E!PA}zjtP4+UHhl}=bRJXbH z@YT&Qz*oJDH**xEe z$z`ai1))AfI}aGeXSdPRDP2}#eUmODnN@F^%M#1Lp)ymoqGeuZb7sXdjF*CYMqF61 zy{jV}n%~?J>1u4zJ9l_YC=xxQC!hkMeqgyHRso{XmTSXIM-({!@1l84Ciul)Sk7|KBqC9 z%N@sXF1K9abh?1mi=NL@BK};Tg$!>+p0>|YhPN^NPKI;+?=#?A6iz1}lx1N(7|x7&k_Sp&*v#Q`G1e$oc~6JJN@uUM$hg4tiq{!V{vGE{*KXe`~R8Y zT>pIx=YE(&iX+j9>q-B8BPBX@( z$qpCb(Dtuo^xO`&Go1IwWen&2@ot85{Ra&AdWLg*KErTcuU!o1{NFL)!`1r$WY4d# z`g#;j{b&Zms|@th4fNfNzJu}UV|XXS?@~C~pWbEH_WvfMk1+gUg_E8Q4F5C3Z(+Dw zJ>RGL_A>k;g%h9o3}3_O?_&6VhW9gkcrI>`h(904M;h=84S1OW?`QZukfH7K=16%NhkQHImkAsWBpMBE?|{c0QvX1;cFQGqI}#SkzCG4{tu)QUyMAh&jv>SLx%r?;hawi-bbKB z{JH=9p5feo_A{LOPY<5|QzAaxe;#8v_n)iqe4Y}~ug0PEuU5Fze`*=Nh|%B4_;COE z2BYWxGr;fx#^;ogxIrR)cspKUz}GXJ`_H2aclwX~c2e@^{_}11>Zw!iPZ-YI>uCf2 z62pIlI%s>ItzKaz{+v$*!@2*gV>tJpEez*;MyVG-h(Gs-^BMjC)8`I`hZ%mS!pUw; z3?E?h4>Ei`!?`~D4fsg)VhHKU>)XR{Uf&-woY!j|!g4>0=Oj1O(2xc64i^_ZMgwI@hm|u=(+tH7|!{$D4gtnDGsfFgwb>RFJbhhjDEF&eysuD$oP~o zK2I6wUohY=GyEHj&-;x3M23IN=y|>JM}Y{5>~I+lZHEGdll?jUVutg6=kkCMiRf>{ zq51edxIuE_O&$RYoX3sdGn~hbpN~$`|2r-<|7RG^<-Tmd|7O6?9wYga-`>yoU(E1D z48M}$w=sN!!l}Oh!SGiY{xyc@osAnLC;d2vbNa^_{+}qP?eHSQxg9)S!9I`V|c4^#8?hPX8RkIsHC{bHDn? zfaet@_2=}b8t_Vnb9;W%fWNEoY={|$L;L>+j6TBfv(A_L5dGy0KU?8LfURZt`3Cxn z4fGQX^p_dvuQAY1Gtl2)pl>kH&oR)q80b3<^a~91OAPeA2KtzRex-r_CkFbT8R&N# z=;H?ZeFpju4D^%X8%hOG@CzNdD@$gKbKt*L^p`vEs}y~e1J}<>f)2b@(NA;W;=k{Oj2j*Jw-x;?2Y!*l zTO9a_WGJMF1OJY~7dh~sD7@E!t7Wpa!hw%g^s5~BIG2>W+JV!5A3|x310PiQS_iJ_ z*E{eR75yd$K3vJ&;=o^3_^%xJNODM|7ajPy3V+3cPf+-72dP2Ma=h|qNAbQ84mp4l|EhvzE$BDI`BU#e4GRShSKwL z2mZ05SJ`RLH`VZx76*=&eylW48lTMu{8a<4?MiC%xb&fczChXANndQhCm8T)2E5IH zFE!v%11`UhPmilQt~mAoje*Y}4EXy7{Bz2$o&3)?;A0K=WCQ+X1HQK)6Y}5lYWT-zr%p9HsB8#@K3A0wt@c*3MY5r z_5F&0zQ=&a4EP!Y{+I!O#(;A>@O8S!K!2XXsofXh(CtWjK9r~(`F_R!HvbACHQnD% zFyMN=LwYX5q4n4EN~a%IF+RLr4>SCSjE|l_I{EBo^gKR%WWe?Ol=yHx^*q$cf4U)V zH!}RkOwVS9bA9x@mH7XJ(RVU>&PV>!iPm#dF@iKVtapOwUIdzK-E~-s;E%`b9){z;G#oArc)n1zjo?BngB)5>s!olu1_VyIei_&IsImab343Z z!2id9Z;@(>dPQ-qJKhZ%&HsxA`uhwx{Wl7fNFUCph~Zp+`8#Ck`t$LHKR+75 z?8Eu+_dmZ5*;;?ngAx^^YmL*KkP=~JLyfD~1jIc2zZrfOt|<{8vZ1Cwn?NMuvmS@W z38O^SK3!a;{vn*^#`y})RcFF~uCDp%Q1lx)p}HpeO*k^~e}vKNe4^jLaD90wK96Z4 zoH-voH=}G`FV3IS-^BQ+$L7LrUt&1t!})VQPcVAUhvObaL-pb~=l>|FMK)HV5Yj=H83J+q)h z_;KP&{;}0H;X@fdi{X@~Q?I%pjE)9%KcLPU?^gHZtCTc8DDGfSQXGl!Vd6^una1!F z8NPzyc?{pkaP`WS$bFIFx{WA9{*GW$p8;hIHz|Tdryg-7|9BZrm%9}GMGV(#A1a~8 zBg!5@fJ%)HdhJgO7_MIV5heKh=bzV1aL!lrNh(H%{+%Y9;ioYARSefYN!hy?ek!AH zpn!*z;I9;IW;oSX+mwD^jS|sQeYLKSF`VdoReS7YcuM>%?p!!8(m2y9#>aE=JXc?J zgj*Vm@f8k!a%2{Mw!|u)+1Y6cpjdupnDJ*j@D;f#MpTZEol?Hs5#N^`1vz~HU)<3~ zx{$WMV*I!Xzs1j)q{#k^KBzt7AE8OrHp6#3nG9t3C52R8>@&3^|0*zpv}5BV+bQ7% z_-Tyho5L*&t>QUN_#|yXQ@A}6f~(<6FWr3NyTi}ohXIm)lu-9G)8&8Bd;hQf_&}-~ z{y*?T0jUaoMiJp~KNj%+hLWGg?*OoxbVeHSYWlVr|CdQeBf@}+*_pn!Oc@{i zs;mh3i0mkcAtn7g%(Oysh{(iWeX=RlJRIKyagv|$Q_jpB4?&*fnIZ5SDt^S2hx_%b zjM}CZO4OfXqr+I{U$e^0JG1J-2+qoiAKEEC)X&l2x6|g)k7-!N`0v-l#rRKJE`qn= zcci53@Z8XB`hRpV;B>UX13!)QD&}K+Ua5JOs|!7^{37mW;84@8#MxBdR^>;kGnK#c zD9Y=7j5ApItB#_)Uh{DI1UwIVe z*JUXG^`j`S*I8WuRYy^NyOPiShxRLuwEjC(c`l#!1&*}*D;eZ(Jc{zWGsu7VD9Z24 zAfNVAj#U2x8RXM`!;zNP|7Q`mKkdUDY5C!5eZ}oh`xHl7UcaZo%inMm<-Hl?-*Ob? zOEQ#iIEwO@XDC1aD9YOz%J*a_?*T@nd=CY;R)Z>Eej(i@Q-1PNT+y1B+N50ECI2k= zioDK0Q=Tb)(K=rHLxF_F|I9+7)bd@S__pMt#AV8bQY(Rxdc@OI-bwGg_n|Cd$I*hH zB`x2{*IEDRD3huDI;B7H=gC=qDl&!r9P*c|&9o9#nCwSmk<)(5fMt@uPsyL4%IlL; z{!(OQlD|>Ow^c!se-;j>{B^)c{#YD5om(vLF6JPviI$Q+)81yL@-tNVTLpFUkCv~? zzo5!H+i#aD&;NfQ;^M6Tv%oUd-@1#a(GQdZQ2oc?(DJqa(tBB?i&OqeWss+l$CFe3 zA{5Rfzkvozqr~C&D@*6IZ1%Iu|>yuOd_YLyv?v^F=z9z{(7l%{+uMP4` z)CTS4TCh4htLRe5LsU4EG)IW0jcFEl%SruPss*?*lXuYdnvvvJygD9UHD zf8DoaiIu7#)xQLX)BgWvkgxya1pWU2C^l*Re}zH*E+v0J8J6UejhynQLHSJe&o7q} z-6(sqkIGpmQVd5g>D>{AJRVA9l zsmg0RX%2E7SvOlrZ?sRgOCQ~%Wl{kI>X{`CLa zWYT|M2K}inPW`tUGE3s zbc6gEM_B)<2Kj{pQp1%>QL29(4yXJ@O1>lfSSywM<5YQlB3tnkHRyk@DzD|~GS2&6 z0GaH!aX^;Pb|U@jap>|I|2Y8CsYH?|{thVlhg(0<|C8jb6ES&Cl}|8K7f#w2RC%ZU zcBu0D_qsG6t-rp17J#U~2Gn`SfV|kK7*qXet*p!I`jb9R*;I0HGW|vRoIG8R(+nKW q^2Y;nj=fZ3@Fx0;ccxB~XZ_b%f7*M_RDSsnWQpBcpkm-G|9=4KF}vmf literal 0 HcmV?d00001 diff --git a/UserTools/template/MyToolServiceAdd.cpp b/UserTools/template/MyToolServiceAdd.cpp index 307fb64..35a5a4f 100644 --- a/UserTools/template/MyToolServiceAdd.cpp +++ b/UserTools/template/MyToolServiceAdd.cpp @@ -5,16 +5,15 @@ MyToolServiceAdd::MyToolServiceAdd():Tool(){} bool MyToolServiceAdd::Initialise(std::string configfile, DataModel &data){ - if(configfile!="") m_variables.Initialise(configfile); - //m_variables.Print(); + InitialiseTool(data); + InitialiseConfiguration(configfile); - m_data= &data; - m_log= m_data->Log; + //m_variables.Print(); if(!m_variables.Get("verbose",m_verbose)) m_verbose=1; if(!m_variables.Get("Port",m_port)) m_port=5555; - m_util=new Utilities(m_data->context); + m_util=new DAQUtilities(m_data->context); sock = new zmq::socket_t(*(m_data->context), ZMQ_DEALER); @@ -25,6 +24,8 @@ bool MyToolServiceAdd::Initialise(std::string configfile, DataModel &data){ if (!m_util->AddService("MyService",m_port,false)) return false; + ExportConfiguration(); + return true; } diff --git a/UserTools/template/MyToolServiceAdd.h b/UserTools/template/MyToolServiceAdd.h index cb09d7d..d65f51c 100644 --- a/UserTools/template/MyToolServiceAdd.h +++ b/UserTools/template/MyToolServiceAdd.h @@ -5,6 +5,7 @@ #include #include "Tool.h" +#include "DataModel.h" /** * \class MyToolServiceAdd @@ -13,7 +14,6 @@ * * $Author: B.Richards $ * $Date: 2019/05/28 10:44:00 $ -* Contact: b.richards@qmul.ac.uk */ class MyToolServiceAdd: public Tool { @@ -28,7 +28,7 @@ class MyToolServiceAdd: public Tool { private: - Utilities* m_util; ///< Pointer to utilities class to help with threading + DAQUtilities* m_util; ///< Pointer to utilities class to help with threading zmq::socket_t* sock; ///< zmq socket pointer for socket to advertise int m_port; ///< Port to advertise diff --git a/UserTools/template/MyToolServiceAdd.o b/UserTools/template/MyToolServiceAdd.o new file mode 100644 index 0000000000000000000000000000000000000000..1bfe2c95302e0533f0de0f304248880202598d20 GIT binary patch literal 39104 zcmdUY3w%`NnfD2kfryw1m)3Y?z^MjnLxPBjmv0luz?m}9EJ39z4j~LAH8+z91aE*z zkm(R*Tf4Qb-K8zPZEZ_!x68U(V?n^$-Lksfb+ssMwK9!Wdb3(DeEpJUNMSD8# z&%kjej_r`nm?^T77*9&X?-z%W%G2U(eU)1voe8>##mA#Cegvw)HuJ^N02IVw|J;x)J9q z^!1fEH|guY*5@TSH{;kv<9DyF3``%wg=N{h8rL_+d)KzN)-S!P*-pM`5563eEnVu82)wQ zbUSq$E{-NnQaQt;{nPE_s}9w^K~gcGz)S?%P49xY|B%78JWD6UT#W3(B6)nBvP4s`vX{_6Uxs5AL?v%TqMdmPNM1DDL4 zUs7a`+!xviv4P2pmPJ#Q(bVb^J9&MmCz?DQEjpAqsd4z%fuYObzi4ttXeS6GgKq@_ zcZMdYtD?a0GviU5!1uz|mLVm`PCgw?t_lsz-&-7nKZZiv!4ysPmqbT)ghp|#swbN2 zgv~=CRd1>AGJD@pDC^wOvb_$UA})_6k3@?O8v|(5Yk{HB657-eYKsiM4TF9GGjIii z+8u+&M@Jq~2E#6@jV2F8llO;q(Sp>iAqag3CRQjDl@-Y!*vaCMQ`0GjwLZBwTKkd? zlITchMR*9B2bBoN_Gs$6%5GzvO(_VwXwh#IC)vZJfuTvt&IIjz2X8+u%OoWrT z_IZ8#V0RxqLKVjH)yaE8dswoOtJ+W*G-qwRcO@!fFD}uHhLl|}+5)x@r(DC5yR)VZ zJ=;HFU$~8J*B<%gcARo`Y57gvy$K_?7(|du3*J@k#C^ltLGqpVS=J*phBlIX%tjEQ zG*r~-;>P(~$*c-4`0AC4-~ofmuVL1H6VhWt|}z3LSDt+DLLlldzatyK)fRMh5{cCk3v{ zam_?0AvS`%rSSbcqh}#N>tH2(#T$SsGr5=eG9~`O5=E{$=4kRIg{A&+J3w>CR+#N^ zpqRDE6it1JmlVH^=ajad?fS!ro+yAr$Y>;xh5^yq(ZHRDXh~|T>Pl=3HPug6zZRLb zUn#cHUrXX~S2}gVtr?{PZ*s*DD2z3%-RyAT&TdE78 z#=W7?*N|y^t%Cb^f!mjcnm7iUzG7rPqx_PY2N2uSLbm8S!b2}4Fed0R(5X`dM<_&I z<#ko}JDfOx0Xv~gk5H(&Hrm9xrp&rbcciME(S^Dt#r~{?D%`FQ)gU136aw0bz5ASq zM^y#w)b*hXGL0%tjLiFrd0nuLI2OyMVsvIBaMK=+iF!n@UtWJheQdWHuN#M72n^qZ zJPd2;&>KgO2BvR>BJ>WU>GF4u9_8>jT#tMXFM7a9-;<>?n9a_*`bl(yP8c9Md+g*s zMwD_F3PZWkmZzjgQ;E=k83*j-qij3qMt-A5usa#p$;=4G*$l?AqW)XReB7f~ zVCZ_dN12pff-5sGx-4X)|71Klc~8EDylounhu={pIg~!6jNXVlv^d~DbMUdGm>(XS zUu={ZGl1V}&!B7>&)fnfe@S_1M2RitgI?F@j7;UG!{kXmpZ@jRM~?y@+m4^Kebgl= z`53DLk$Ix$-I_o5tiT+}IX6_9P1$yiRy;uwW;1c6^EvZ$THOTYa&#xl9jSvam!vl6 z@po`f$!<1^?uVh6a15l&YEah8z6XXcl)Z~&7C)0a!Frz>y7b5 zEOE&ttsUKc@rqeiZ)@=K%Y#*xm1ym`WcKXyv6!i>oPBe=x4ZME56!Kts=Ro1dsk~m z|El4BYyzs!0dJ22c*V%NTHCV+bI$WAC=EGFDeiJV!3oyr+!P&b(#R&=S^ zW@~!Y?Bs4ti5q)?d~DL$SydrPoXT=u*Z=s~1Crw@N6L@bBigB@40bbhxTje46c{0P z>8jF|#Qs+I#@U`R`MfUPt$H|RqsMe$X{g5Om$f*c=$Dte`{jEw{qlQSk?u!;AD?f> zPDV|^$I3*^{v1>FS!AWxu|Z`W8W`FMKvAc^j4R7JdbG%2=YODx7sZn?IN@AU?ys=s zIoHLN{uMIM-a5|z>}2baasL0DY~5Gv|6ah_S^Rj>w*%JC$NP6pwsxHCADwKyJ;}du zvh~AL{KJ#2Uz8Hx7U1=L0pdpjY7j(hw-jCTO&@N4>GwZ1#roC+|K2IqPbc`_nqqx@ zqW>pl*7qj*ADvg3i20AcNINTRc@SSnMC1X8ohz|HEn4&-|eLqu>AVG;7OvTz+M| z|FLP-YZLrmpJsi&#Q)i8);lNpzc|hM_{6Edm};d?_K!`q?mOB4ooQBTlK;)A))SNb zz(3C7{5;@KO|$Np?0kITlHq~FFm#)oN=wr;siUy1Gz|gRoyK|kNsw=mX#UV8rKTV;yQD8%; zM4>x5d?P+fqN#!MMsjnokEF2v=Hm7@Fd&+WBj<&{(8r;=G1U>m9kOmgR%5CkH(2(a zt!}Qs&EtWgM?n`!tw*!M1m*D6h}qpfKcLMx z<924PGyH7-i&%m{UKsMEnLr^=nVB&1bW3&Q9ZcUVQKdNJ9t`uSZqGb56;b z0h#QGSoR;8#w;B-v=3+``P1}=aL!C$>CrJ}q&7hEA_$J<$lU<3i&W81*G1P8ESUPf z2!`ax)R3G3`^QvdVU<;9rYj+p! z`dbshuKtdWU{`k{xVELEeU(wH&tkmT^m3c(_>~#ln`u&=AzaLDjssw@N9;Pa$aE}4 zODxaq^t91%thKjOuv!`K$6l%4&BS781s6$b<+_cM#HIoFhMIIha~mGLx03H{0C6ja ziCZjos!?wMU8OdFYG^N3ax~Q00ICAQ4ImC!cB&o=-K~0y<(^_y0_c6(5op=Ik02l0 zHBrpOl9-8U8^RbQ?o-U?`_tr*i6Ws|1jk0tk?MfKoY$WyREx3Q0+PP_gK3`po2(lYTYm?8b{hBsIjKXL3YOsx?4I0v0VE2bcVT;`FVDRrGrq=)n zYm(qG2ez$dV)KdnSez<4`&cN7vyX)f?qhLFj(b?Bg6uYy+2+w(&9#REL)$p};wmRF z)TvnS32mnqRK+%&e;4T9p_Uwh$ve~zo0`a7Ps*g;)W8<>C^=ARW^J-}0&LLPnf00W zj@qKcr05~1s#V-Mkxrc=G#PJ%R1W|P2VC1w=y!V95KUH%Vr#K;t=2+OXD#HI!6svPosgC-SF5IZ8P%Ar-tX)>CY|1j8$$Ows+Kv% zLQT$Nnl139*~W1m(|iLGafPQ1=itgbrcp-nG0hIuc{opMQk*U)oke0awStr7GHz-W zz0LSPh(jN+Kfen$tqj;8qbrY{3{UEL&y*Q*FWY~Cmgi2*4>UH01&{BgNOx$!4 z^DFfsr_M4@DOj_N9Jw25Z`#9dT}(ODc#h!G&vsBDh!-VUCKe_6y|K&k@J9(fPFn9*NM1-TZImFdM1RK zeYJa>S{QyZakf49r=rA%ZN0&0r*xL_GFl(wYj5$XFvF>x^i7!Wao>^UWPfT(nf}w8 z$oQU9t}U2^|n2FwS>@+*Md+ZXU6WcI>;Wt#F4~o^1u~KZK zWP4P-Xh00tamyGjRyMdlqp`|XswXt6Ge{mZ{Y9qHraz*J_1GBJqmQMdywP4niTW?X zM3widHfx^`41Ev;Dil;W!2{_x;SS7;#+)Yw>9=vK2*xl~+C7HGxjQ`orgQ{9aF0r2 zzBBl8c{H^SeHdpr$+;o5X#FXu?9|!OR9DH^G>l`B)WWjwqaNA!C$5bo_co?(uZX0& zf{nFrHzwa|3@m(+)9NXRF2so$-S`w7Jvrw_+oswN!i)_#j@q>cZvV>g z=)e~tyc}~3P=C2Gd8BUWwfGsx!2WGC5cvV7 zhFDG?!G!k*+4V%@tXCtOsAR*2Rk)x5x3)GUUmtr+uRllDm2hEG60PkiPgI-mv{REp zFadp7g`H{(qMXrGI|xr>8BtOcGu;BI+dveqer;^f%*Vskqv<|m08HPAbNw~;$fA-+ z^1tL0ba>jX-5VIXk_{rY5?y6ig`NCeG&L@ox&oejxiR?)RQz7mz-M4{H2H7QT zoaQhFwoZqA5WVG8oY9|EKS>wo^qjCPtMT@`#{OyjXF+HupRsF~mnROO{&kSy;umVHQz!3{ZQy)dG&55R3E22rPU~i#!q4+Hjys|~K1o;A3{=%wP8`33aL;Pf3(qLk2XQ>cqOUzW{2iK-keM<0jOeW2*_$xBF_;d*F5J+%TPG%;W&r;X zH;=5qPbB%W>Yh2D9~rt>0Oh&9^f!om2CJ!8b95 zT15{GzKJQ+d|Y7)HGWID3a!~rE{C1!H5JYo_72u3cX`Hn>d zT`roEvp~#$(E=|>!2P!7o36$QkBG&2Vd@}ejMR26Dvc!jezC$4yBmG)F_r7 zFY#T8GEiv?%3*<6_SUIFp>1Txe7#ygIB%!5Vnd7TJ4_sdnRQ}b@ukhU*}n9$P)36;=&O!z9iJ+bRUIP$YG8P_n)#Rx zL6Hd{)c+_8!C*uN)Vm_msdc$jHkvq3SDMC(w}q=;z>106NA8y!#Cm5Z7+6o~b zaZ$b3@Og!^FJseAUw{!-y?4Oo%!;XgQmJ9_d8<}D4S1A}d#AD!p=Fr+ITeMDxQ)vM zQl@{emRt=89!loog07YCGkCopA>L=u4>S>FJ?U*+j!dt^v=$ru#ZZAhH2qJ1BDfKf z(IE3xU}H?afq^F!$!9VPuatIoDjZCA3psq4k$EDFF&?|Qs(k4OK!knZifHQQirf)v zDz~sBskK2xnB0efYesa|o7G1%!_0E>I>St0>tfs_pMUh#>FI@pz1C59V4eO9>Wg8l zE9g1i3VL)X=?aeISX7jttm%KHCrnHuSoJ!~2_;aQ2-*)iWo#x^k3UeO$zNoc(1JO` zVA9<*V;`;Dv>c_3ma)g-c(k48cWJTyY`ZT(~&i+uPk6OBBdA z#MSc}*HeV^&UCzf)hd(o|LJ+rf(4fZD;8aG)vRE3b>-a3>foHJITu#XsjdlDEQzlQ z+ARs4@u3T6S=KaN%%Yo@SVaS6MQ2Q!P_hl(LmBWhbsi>VDiN#F*HiVGgATeWm4f`W zOUpKoYc4IprQt8SxhbloPxve;&M-QdA0mQu9*$yOG?bP_Czh7fpKMukc}ooGK^!VS zT3WVsTzzTzW?y}2aIkn$Y2CQB6H9{)rRDXdW%Csy<=#prr1#@ce5G}R#hZOw(a~Er zj@NN8?Zg0W%`Yvxd))le^1FQVOM@fD^GhqX`s+)pHjl3_tr?uqP`bkRDPL&~kowXJ zP=FE~&;?dyBmBkm4gN$j_JwKRxUCb7Wgk8nW;;eY-zD6d$A?mE_BnjpO3UF2rG!5I zs;0e+I^g5MaotKk^Y-XG73RwS11gX8bx7yYRwjP?aIeNL#kKMcN~3+cs5E$O#$eU~ z>wCYZZ!`4SGDcJxBRMRp-rv$ZS^vWs+W$?Eb0#YS=`aj4HoQw6EGpgZ_x+cDVxd}C z2s2fk{z%JQLEZ0Fr%mG?g|vp!s)pP;MWGox<%|oYHI%7jMGC}6gA*T3;~p$%S;NWW zeua#Md4aR&+?lkdc>JX1```%Pw?kc2+fswLuF`z*GG{N%Gv%Ljq@tkXSkTAiPK>MNr{crYIX;LDmsYuny9X?nDUUgm%BOnUTzUsr?-b@`_^f$(m_uLV zf#bn?f#kDPubY#@SqbfAf6B!8H_3-@+T5uL`Qrr6`d~@`FB0)7#Gt%&ipKrpv*x=Y zb?>tlc;JO6Kt5}s2fgiqM?CPw9(bb%&M(^Xlh5MUZTZP(@oTsI2I2Z8vktMl+d z<=7c{_!fodx5p7p@9qgcuJMpdk~5~xu-q;6?-YH0d~*Ho%pP(DeNE#Na$)P68pm*u zlfI*I-Nk0|c4>TE25~MPR=E1f$@zB$>CRl_j49kL9rG6@*vt$C11^2m(p*5_`4H)a zk`FDTQ1U@iq2zN$n!GHZ<iGo^}qujIA?zO$!AT@2Z8vkDIU0)<>zMmGOGqRt`=-L;AN=VDVg5f%G3m3 zYnUmXI1D|1K`;!W-<J7rseMh{&_JTnV##{iatAU;Tnpm2whAe`%eZwQ_OP={dC~0#{&+# z)uzw29&)Y*KEuk6o5ug09&+vkK2ywpxXxnwo`?KDc;F@IA`6vkkq6%Cfj{7Z@Akka z!`?!A>pk#~dElS(z<=w3&qEhqNblty_@D>A-2*=ae5Pe;&$85774>p6hLA#fH+kUS z20qiu&R5ks6>@qootU1`qm_2fp0{-{pZHAdWG~oep}?{|fjFu}(QvGx|}e8DgDc${56jxt2N>Dtgmf zdUw3%=GgTf;j-1zfR;K2+kE@IbSk-ukLv#`AjFI)G;!)q&B zyE|4{plVLk#Fng#C3@rW2yQNp#aagjs;jH#$XB-`t<8z*>ejXv>{9o(v?ux^t>NYt z{Di~JiMc&3?Y)u3%@@V+5BJqovF3P3yfsnXmxwgi5SrT&@9T@G>=p%uYw#|3ytf6W zG=bwnV?wN}dsSRfRJQl^wp3Qp#QcIRm?KJ%pIu!Q>1t23J4HQ_LM^FnXi2m*!j=y0 zs728$=hsJLvA+J5u}I9l8DH6oZx3VFHYcJru~>U|JG5Js(FIlfspiidb)_z=ookhM z5#bvC)bi(I{>)>M#9R9lh@3q0 zP#0EA`{&Aq3!B4BV@vDjN5e5tF1v*Uey0tdFg&vhb9JUm>UmEfzz(CDz)~ z(GlzKiVyT4RO4t1S9Psx?^+dG67TEpj2CveXh4b}_aRU9oQC?pzADiUrS0*)>I;{| zJGX!D7!q#bL!smsbJ{4?A4cgoJt*F8AxwQK@ zqRXjR*m6R_$v|0pHF}1u#&}DoM?CAOn%B{dI*pqGosix?k6AnHqLtmldsTl&yk-qPcZSV3s?fgnisNSs&m}v9LAD2Wjw)~v?Ook@ zxt{f{_!uL(QGHQ!qPsV~z+qc@b$*-_4o1!3mZlzrR@a6HdhkbM7Ib&5ZeP>is~n`f zkQ*;GvNL1zZRzyx&vX>jw0HN#z!%rWS-Mn4;BiZ25Y{?oKyV@LTJL)h&E=S&#<~I# zT^-8}VeLDF|GZcXp~Qm6+Pm2N3tF&*L#Shy7txw6w;7hzJTZf7`9&JMi*ap9XQ$N}?`-W^f8wIhL$|N#3NKmQJeLN+wY9NWqOG@k9oqRC zdS)ZO&Zyj6IhVj~GGQZ|aZ7db<0)htuARAZpk1vz75R?Om9qC0g4w(i>N8 zN#Dg$dYxNniSUZ%W8?%LGSiw^&Uq|@HT$KQQl+}hpQ z)6$D2nbSkLjLV(gVvK51(~erSi9~z=f4L>>OtaPBaZ&Zz-O(3rSKvy_tP6NCS7db+ z-fL=Be-$!*LgoJG$^tr^`OBXj4m#?vXuvEp5szh-Fi^QPk~0CrV6IjeGFn$-eVxOD z4k0rt&gF8|Oqkrb)8EHd^>xSE(BeDd#u7Lk%S>o*b_b_^+e{hes>Ey-RtZ&VvEQz+#E@J#lpItGhWTfowUoR8TJC90=)aFUT8_D?KHw;N)>>q!6q;*wfz| zkF9O*P4u^PsLc)YZ+#HIPT;o+e3ihr2wc*CS>xP7nl0!b5O|fq z`6(3>>8o)Vd-x#)6YYuPF!)mhkRRYQw1*R&(S!=@hO6#uMqT~5qN{ZrJQCD{O<&Qmyq*-z@?nm1TN_( zvOq{YNqT+_&BW8)IE?)@0{^VQ`Mih8P2VQ)`;c$sbZeaTafar#G=YlEPd za<&S5y^!+>AxFl;zj)wJ2{}^!GlE{m&ntqyQ|Jxj`4AKBmvORE<1FtW4pZKz1U@YA z*95*<;OFS)GL$3f7Yba`?-uxWA?LpZF3bA|f!`_UFP(@RB)8rb0+)JI0+)8~6u6{+ zT;q&Sz8h=&^KXKFi@<;4LH~>g{m(q;f9XL#1<$vb-1eNRar!~pvsY0&^dvF%&(-4v zZNCvNPZmsD;FE2+mex3{esK#fzaPtP!#V-7LZO31^a6`Yyg|p2tUFpKN zE7Y>Cap4>Tm|`ycKOERv_H?`OH#B|1g*R(_y$c`GayGee^>rS2HoNd|Y5Kcd zxGAq5)>Qkw2K=TT&GREx+cJTB(VOQ)Zu&F9%S2qZJKfwBegpE1-m^)JMEV#GgP%hX ziTDZ}2G`-7C*Q=Kn?49$CbvB%9^LeudoU6IJPxB*eSM+vguokc&2&DFkKi!$iwPnT z{{jw!n|h}FFAChmJ8|i^UfeSgPvS6gOnnkxA@HrZW^&8fuF!n^yB_$H9=Lf`jpdU4 z|1=$U)H{g7*lF5<+s<VcQ~3hNDd;1_t{d_S*{{VP52^&a>p5By#aT*Im#SOWu0 zyFS-3C$C-9Xq@igm}uy)@StDefwy_!LmoKS2u$vF>Sh0V4tXB^B^r0z-{e8R(gXJz zkG$;voQIsRdEl~KH-g6aIp{%e%FF&-u2W2X(Qk5{@_$->y@#Dr?@gdF<-+S#D$#y9 z?>bH6w7(CBq32!@6LC2oyI7$Pz5>^VewxN9pK}9)8+y0jIG13e{9AAs`fUV}NH6yX zKBZ6xm*f5^qF$P4BkzGamR8LcZJ|FyEC>j*OqPL_a3=&eAwf{OZ7z zw_4C|6!_(WexAS^1Ww-?IjcP6Bs_4rZzlDA%7gyz1-?n>{f@?YVp}lw{EMKM@}JPS z+i!=2oLV904<2&(UNsZz_YNFJ@6QD;$LqAl-Ss|&0+8JJ`5Na*%BdFgvL9F^=w(0f zB|$IamhVq9@id6T*#9NH??ik^;Qu6WY0u*x_(2c+_VEykMES!wjNTmr-z@Ma1TN`+ zBXCK7-UKisQNF}i3H(mv8NFtIiPbz;;D00Nw+Q@Rjl1bT=RyAyfy?zvTHw;3zY+Kf zq4y1e%k{zydSS`fP&sAMn6eLM{{Kr*Ig5eup5E8=orrF|N@J zJ=cs(Zu-wUa2`J1L(VLLOM9H}tn%f*BIq|km$7FuD;CLZ5C0N~!9RuzL(ewI#R8s|xln>Pr0Id1;k1HVA@ zV;_es<4>>W0)H*&W&4uP3nYG%p#OxBKcI1Uc}@T9#`g$0QvMS{j;zP$1-%?E`2V0W zQNFCNi0E$zk!Sp{Q{ck_|B1ji3*0CAF-d=}z$N`^flK-!flK;-5V-WiKYHNL30$_v zX}aHI^N{1hX`(;eBFYu?p#OjeeS-&m#Dm`S=d2f5k10Vf>+uDRyX*0FK`-lZ%1IE6 z#AYD@-R7x)T+cL`j|9~QVA zH}4ks-wHXO61bGFy9c$O&ue4nBwRBQ`w|X=PbG*%|9=^W!Otd$#JKtj4uhXV5Q%cW zio@WPI zAaH4?#3lW=1%0``re28$J#Z=i3z~)}Dc{UJh)em`63{8A&(wRpzUE2F-zjh@->ey# zE#)s2^g(@1`6~rpA@DYVR|&jb;O3HYTFed3L4D15<6#o}G$vwxb>;ln`kMH7fp-gh zxxj}6uDeo&a8ANRImTDK-@zXw;yjGM7?VuIOq=5UPx*sH{A6|I{Jf~I4ZUu*)>{HE z74$Y8gCuqGextxA6VT~?flo0Y&gO64P>yNi%zi=8Pa~ky>jIZwK^IW~lCj_DcD|o? zaONFQRCw3LMSqsY`Mop~PxR0Gd4m)eIQ{tngJh#L?SKAdjW5d8U;@J8?_ws zJ_+e_`p?R~^__{9l~yI*_IA!~=BhW|(Nf7b?5xUF_$tY&T-n!W;ZqllR^nwp`Hq%) z#rj>c`H@0ycEPV{D%Z5)eP#8XP7DTh#Hpt+Wp6j_qUF_<_{2%x;@hUIq5WMQ?Kj3d z)?0b9B^A{Czy3{{C=;yBe|xP!6%-)KuL$wV=CQvKbH$=>A^TStuQ{?N~X0lg&N26ox3U%JhaC!0oC>&}zo zCUQDj+LPa(c*rYKmHcl-1cAMkeaODC%m z|JhW$5`S{?L#XlA8@0dUZL!t-E1ejWdb<$?f0;Ch$)}k^Nr`p(#>~l<;hxV3^>iU~ z_To2lbu&*T|MkbgzY4TWW-dhjOK}{l{*9nzlJ;MQ`(x$5M{#H8)YMO(A1nX%0{oXB z2mk#A_}3o?|E>c3A3F~IJq7r=HaXVvoB22AluY#BT{w=F|EFX^GHW36b6tL{{4bCh zN$Tf%`B?dnDDGVStdnEqe_iuS{a-i^{$f23m*waFz_IF|r1_=(oyWm%-v5#OYmS3I zR6zeV$H8A!fPdX_@LyKIe>WWme?tNNT)(^h9|T5Hx#q{M#lKI;^l`cP#(kMWvp-w| zQf6HiQdjq04?@Z*ZRzMm%kdAD!+@r?FL6!m2*=42iMW=^3zR)^p~CDFb17d=GU#i z2|0!I->dc8nv?$LSmf5909HtUmDYcr<~Jv|e(nzy(toIcey+pa`uS{y`p3Eaw@mAI zkL}bY(?hr+{~QV8x?S^Ex^U7mne!9++RcBjhyRWJhW~lZ?`}W4H2*t>kV=tU#k8IQ zru?UJvp*Fv=i2PUu>Q`$Vf35!^9nGkaLeW2?O`(Sl{$BhE_2PZAI#);$A6P{ym?+{ zc--|*=0f%VdW*xcLN`p>e=ZKU{rr2Ch4fo`)9f2Yu|B)?f73(%vbe)As`;qD0*71w z(;oVF9;5!JJoJ~Zb`(D?^q*fq|Jxq=SLh8r&iQ0=m!JELh5Ua+>tCUnS$@-35-Qht z7=Z&3dG)_t>kn%>b8_pi1Aig?O&n~I2Aq@X|JWAY{vYwse?;py^jb__|GULQ|9-9C zl#}JJDxm-0JoHNKVwcNqu=niYkqh8->CV`-w8A_+~r>d{zCO%*XD5a_?fGklKfiA*qaW4!)^CHED%|BiFvDRqe@D-`%OC&w-#Qi2 z|GL&csu`)D>rc1-DezAr{dFOS(fqwWEhex0r+~kZ{#{)T$6?`r_9brpYmI)RK%dL> z!1-R|Ki^|e`d$3nIk7+*a89cI|1Vs-^(Vn!3LMp#lWMenLvJ$8y*Uqg=r8YgILb96 z{a=T}@EQD*y8Q0>N1fKMn}L>+U;htlejnPgOv^OCyS*6whIW^+-^E|0?Ps6GMEe)u zF#N{;7d`B6)B5!^fFe{f{N~<|i3{V;ZU2DgH+l@co1Y7ZLh-jk+iz<|+P@Ho;Wu(W zp!K``zfJ3ZulYCMPbp;oSC7&D1`qqcTEPBD0sDJB^p9%&HM;!fWXf;u`S)52`R|C< zzd{qze~WP#e&fHt_t5{k)?cCd&B^ea`_Fjj$De;wX@w@F{ze>z-{{}tp+7j=VXV>o z=4ANI{UaXwcWV9HG$Hk0iNo+4{fEI%%pJeWw0_2hOz!*H6AH(V;Wzq>4EO!{;4f5v zb?Y1sV<+ujg2V6|{5q}Qv~#0pg|=Ux46I_EH>MnXztvo4h;VVYn^9-FY1eChSNCIW z)BGD*IFXF~21=On8$h3zneyw7o#nq8hv7Hnr#)`n6j3&pzxe-nbO*|JHkRUW^Yi(? kd+Y#YjW+Ptvi?K!J9qiM;xeZYf6vVh$H!1UncV*S-!%&;E&u=k literal 0 HcmV?d00001 diff --git a/UserTools/template/MyToolThread.cpp b/UserTools/template/MyToolThread.cpp index b90a7e1..2e8cec8 100644 --- a/UserTools/template/MyToolThread.cpp +++ b/UserTools/template/MyToolThread.cpp @@ -10,18 +10,19 @@ MyToolThread::MyToolThread():Tool(){} bool MyToolThread::Initialise(std::string configfile, DataModel &data){ - if(configfile!="") m_variables.Initialise(configfile); + InitialiseTool(data); + m_configfile = configfile; + InitialiseConfiguration(configfile); //m_variables.Print(); - m_data= &data; - m_log= m_data->Log; - if(!m_variables.Get("verbose",m_verbose)) m_verbose=1; - m_util=new Utilities(m_data->context); + m_util=new Utilities(); args=new MyToolThread_args(); m_util->CreateThread("test", &Thread, args); + + ExportConfiguration(); return true; } diff --git a/UserTools/template/MyToolThread.h b/UserTools/template/MyToolThread.h index 9faeb1c..af9439e 100644 --- a/UserTools/template/MyToolThread.h +++ b/UserTools/template/MyToolThread.h @@ -5,6 +5,7 @@ #include #include "Tool.h" +#include "DataModel.h" /** * \struct MyToolThread_args_args @@ -13,7 +14,6 @@ * * $Author: B.Richards $ * $Date: 2019/05/28 10:44:00 $ - * Contact: b.richards@qmul.ac.uk */ struct MyToolThread_args:Thread_args{ @@ -30,7 +30,6 @@ struct MyToolThread_args:Thread_args{ * * $Author: B.Richards $ * $Date: 2019/05/28 10:44:00 $ -* Contact: b.richards@qmul.ac.uk */ class MyToolThread: public Tool { diff --git a/UserTools/template/MyToolThread.o b/UserTools/template/MyToolThread.o new file mode 100644 index 0000000000000000000000000000000000000000..49c4764e0feff1f8f26bcf2acbaeb4d0d1b69a91 GIT binary patch literal 37352 zcmdUY3t&{$o%c;9kqDYetdFQb1{^h5O*6XvSG+udqQx7J!~Yg8Um+e+=ru3FNrSf;Upts+)2-|v4OGiUDHg8}`% z{r2dcd+zW2U+0|vdHv^3SRV{8$jrzvWXdo;Z6u#0Y8XxHlKFC$EjO+<7Bkv2@q8Ap zvvEzvMf_86O~qA+%ZF>ONvT<79C4;Nt<;<`xWi*f&y$d}+A5c!$7Uo7%v z;(iJ4mx}yl;(j^qpBDMghj-9Geuj zcEyPb^iKZ8rgdaLm6_IoWsJxnf2gX!+*OswNvd)J!;o4kwC=1bFu2AEkn1f(WkpFv z-C}d7otaZ>TJM=d>x*F4i>B2+0%=U)&Z>h3*F4nMO9t$k-@~=c?~w9Yfl%T3{IZH` zp9xvpMOAovR4j%nrlRll0@Lohha}lgZl>E%%%5XgmHr&A#{L#79QbRj$h3nfK5guS zu`$%!w99kMvOV5`uYyZ92D^vtZ!t-`m|U{18th9$c59xAOqFSO7lf??LjEm2b0}C6 zwxfAw*{;}>6iL+suf{;SZ}exPodg%~?)?c7yZT5SC9`jP0qc9Jb;Yu0WdD!tiLV+P z8`~_OE0(M@y=_ldT+JE+oe3}KKzsb%-0*r&CVas3UNU`NPKGzXYQLz`#bNt~9CN7O zzm4=z`Sb{@97t1Bbl*U&|%nA^!*pLVZWP-rEW$v0R3C;L#j-yZ6x( zHVGj%gb?jFK|60}RuLMv-@k_>+PAPSC<;@PLCr8gwEj|aSHJ&RiS{F`HevfAR+FT) zN+X`1X6@G#jW`WC(TGkfO?#c+2UD4yDR!k;#e2;hn$0Y?c9D%GjC^3&JFp>#S7(?E zzI2_xNY(~b+$yjffuWyD&7u4Jn}LUiT1y^p@1lme&)*9GT}AlnXy@{k))xO3`g$5?D;wY>&*toMcbK7bwf zgHYH(gJ9t2OaR$&1RPYW?q4tyVxOwEyTRB^j8K0cTrW-icer}%6)1eN9v{>+^~}1E zwIhsv=mlsFmHWt7<4G;?6Mdy-jJhXx-YJx|hFWZGeWUfB9u*m{Fi#Ow>!WRSkZEP{q9N57aUv&jV* zq^YyX3+OC#9W1X$PB)uSB~YE1-$!tPoLwK-&#i-m*c#NDP9_Sewz z#OA*Ad;$OZNd7kFp%`)L~>Cnix{g7XOO z8*ZY!PmoaMXcjJE&ulK?t}6JD)gZ^?MbiDqu>c^%R-i z6wZjpPJ~-|`_IdvK5Oy!@HxC2J;9yfQsl^;nyBaVypI}~#7V8Nf_%utV_-L2;R56D z-dpSZHQ}L${G>VX0@;Jr)WmQilY|KcC}U2r2TlNb*0OM5I1S6vq(4k%e#k6o4s|z$ zE#8U3o=VLAz0icdxfb3AL98jz{1tusveTOWKG{1>R8&v(g3i3e%*-5;JiVT`Y(J}) zFp~9qogV}19U6rg+vEgWxWl2zFiz?)P9T`CCsaC^Mfp@0l^k4|;;B+6*_CLRUTT=K zy~4yG&CU%df3gHPC!$$n2g`=7hc^R$vfRCl?Z52FSd8OX+)Vq&L~Pop;ghh{ZP~D$c0J0@sp7hdh_Zji{>?k^n((olc-1>& zV{Ba#bN3&Z6k0uR#ae{z+`B)7o?f%;S?^E>AwR?lwI0_PL3b1bir3)-n>xN9HLbsx zR#HkS@jy!aQ^B@e{Mh7qU?MyBuk)8C+Zp>gpWz>9k_Y zh>b^l9~?G0pM-3jTFch?O?eQo4q&g#dN_U_S_OL#oZW6J3YWbbw*Fyy7yJ_2!`KvJ z3)!oo6YCIn8d{MChOJ%Q(@pEyz*u<3Ve@i6gY1Sz^Kvu>CXmOFw{|^spfLVDLWc+z zllpk_*fq(K7GL?kLjYVaFTMorLr$0zzc-QFLX^PF_@C*-4ki8Zmm9W2H|J87`FdVP| zPA!NUW&Dbtly7*Be5h5yyP7ygId$R zr5sI#{oOJqRx}rl9XhL(?eg}Y1u`y1ZbiV{e;P4cuf!+AKsFDBt>>k(?l0l|3u_m^ z+UpdtbSehQ4E1sQG1Yw+(2(_V{36^F`!j0M zQ7W+4Lvj@aM^fbCAU;&Zs(zM`$R(RF^|X++mo0T+D*<+ok{9Nfc2Bu!FQxNHP14LT z%*5#_DRXj~%ri{uTFe#8FjuHjnhFHN&pJ1%2|DHf9#orUo@`s3t&J?ZbSkz41GTPt(o=P?;b z%aiu|;ug}$SORzyqDftWQEKT`89A8*Y;*Gsy~_Id{nVrGid z$GEhQ_R456HJ7*lPB1dv@d25WO((pNpOQyy3CI5gLb=++Z^R;zB5|M|#c}#D#>O(T zinHt4Js&QFXLB$!`=xy2$;|BE<{S5AWk2dQwq@?2c+mnLNo6dK>p&AzM97|zXp7^S({-!C*i$OEgDpZ(XV##hL=2lKPv zo@yM-&;EI#ar-F%hEC6Zu+Z3XdiE<*jr-34@O(k`ZwigCoJkt?owXS77ba)lUzo9D zN6z7|R>Fnj0fsvCihURhL{6sg5+Zx8bR~G3IOQZfWtgwa0v`8(NxIFlrn#llz?w zuQ#!c3-n`yg7YFf17Qb*UDJK++YO31vS17&p_QZ{5qWGPk3`gBD{+emq=`9=p2HS` zvJ?dFB@a=VE^*2t*hGl}f~H-~BXIU&7N?{braY|iIAsZtLik7w9N}bv`i_7K0ZW?#mHcQ!6~OpKz%m0& zO3V@%7Gxf?G!dF3IV^TrLoDTdl+$E@=oB*|8M`DcXgC(Tq*;J;rI3h_rifkUq2FbN zsd0VmnZ+(Ug!Q6pDAtF%7J-1GfueJ07Gna4m;|tC4MwsvZwW~QlZ;|AiH|cD`IXCF zp#@uz@X$pN0!QKB#@J2e;h~^bK0i*~8nzG-Mlfx= zkh&Q2Wj8!bOz(2~#a9RTR(A?xNqqUkEg8LAe7_-PMsBUOm&Hk&Br!ZQEgVV*R@R*8 zuwmFj6@LtOO62_8a5t@o*d1;?OgP)!Pp7~Qm}v}F({?|Os)Wmiy|;}ZW7>oM9FzoJ z2pmEegl0S|>L$NAe1d(qWR~ss_HU-s1Fb3E{#M4i#lMxbzy~(t{&#Qt1I#3E{sU~V zu!G38M2YA-w{MGom^c*McTRu}qPXmUJm_$bO9pLOS2Mp8p~qCegS#{({0tWMq{^W) zL-!vdQz@rBTPk$Q7z<(9j9h zITWheOrjA^CC0EVn$0d>%8kW0nqg~yIAesqUczx}6WTItJ;2&B)b~EVUV0c3k;9iX z=OBj|T#(}N%wX$9A|(?LiT9xbv<*>Uh=y>PBV5 zcAD0eEi>DX!nq#e>V&s>^B>|)CaOde8Mo{}{63r(#5fCC!|?+4*biH21ib1UScl z9Ooatf>R6xRbI4a>lTo{+jJ1}hhNq&b%p23PBY!3;W4q^O!NfAEQ3yAtm zHT(XN5xc^a-~7oU*~<8C_#SZY%sK;R@ZO2vi1P}?m8qUI$Yd?!|9B4tkMeqL!X{d% z5wb@AQy-Jr8?T^^l64^dI?P0DW%NwbdS>(zsnaYw;2n4uhG3(wobIKSsxKsFRB;G^ z50{G(s}8u2Fj&=5$|oH%sh&)84%+zVShaSHBA&7%9u}KH)-PoTx@Y5{MKz@Up}lJV zCqckQ0&^#{K>R&42NsV}KE=k5;E@rGZm0Ia;Cv?D1EzQgACSV5vft@@9UE~BFB7{H zL}n0i`3(jNjq3u;PB4lFI8E^D1&l?W!w3lho+048M40DAba=JBibr<7gad z7CRS;HT)R345-L3OCy&f#~aRs*CD7fdb>Qk;*{tec#bj3Fo%qPc>8}O6(yEsigi3n z)QEMQjiybD%lxa1z{t4FCBlHj(K9wXgdowo=rAQd7i7@u723gOT_O0kth z$U-t}!xpbP<0~nlnGa#liQkdV4j*;)tJI!|h{afRryyR+*b{I!1x-qo6#$(lQ_6TK z2-|D0rqhn!n&oFf$d^HA+Ec@JTh8dI*w=;Z1$jS#`_f)0c1_6IS#946?`-o`mmR6L z-mms9*hgWKQ!v|N-jusNyjW3)m6v_DNP+UQXK(q0`8axGJuZB;xaZby;O&>MLwEu9 z+@SuqYU^Nm|Do;`B<@6)u$9GMGYH#Rei$$<@WQ}w*lYeR>>V-|AF7^lKzy^eeuel}Z(Sq4(Hq?%;)z3RFsx}iTh>t!3&`o(v`_NG1dMF#TAO^R zXV`8A;h9uN)D+c>H^B7GAPNQ!jrPg-N-!`S??M5<_zk#MEHQ_wazfVcl5eb0WElaM zo!76*K=C8`9uPYosqxl3Mt`vOUJ#nrf0<>=3SxWE|4CI?f9)nTXAd=J zb=kglRw^&{3uViEv5~ZTcA%aesOOIAve%%F-Z7$j zUY4lm>8#-niWWafbk#I;)fDc>AHdy{+C(6+991{_ZZ0rF-ry^^c2LzUF^BGEZr;Ys=G+`)i9SXcl1jgcL&hm|uORv`qc`#RfGFFB z^Q7V=m3wr6r>e`r%N6p+~^s^e$%MI)q{)D{v09m=xtt=yEWVM zyX=W>t-gSCQXkS8ky7?}`mn?EdZwE+@R#CxM96xF1j4o<@`piBMXqmS7J9ueyDGOl z^ZALnMGJC$=tkcEp-Ie+gMzo7+WP4As?4p)djVS3c1B~0F% z$%RBA^cZa6P!OK=7-ytm1bEmClV0R8&P)dx{n=?ab$=%G){7E0?})sI{TfC*h#S>4 zQ^DC>l%Ow6B~IYeH266h{9FyrS~{)FW5}5zS@ct6GkMQ2kjA)79s}`xm*g=%>4H)_ z)31EelgFT6{G=yOVs%I@^B6Nz0rBK9Xhu#=`|wEiOS{PrW%3v-6f7UvokBpF3NBl! z33!I$CyE%q`?rmjTkT>=-=UIPCL!`VjyqYOKn!sDFj*Njvw2#!LJag_t%kIL6Rx3{BF|ysrAw}-!Qj0z8XD6u`J$Vdl1YDBGaJDih@)#3cs7Vg3GU>@;1mfYl(6lm-@rg8y08e5Uoh;6Wi=3jYYqG^J z5_p~#Re43FQ?>e8U;M3VJV7Uq@{}G0to6swJ;!WVxZoQ&JuFxl8C{8ny^Q}*m z`+>XBe@5Wj1Rd?~DJ>>BnaO!rj?5c@yVdIsqBoNBxoqcaHS}N8;M;(colbjp0-vtz z6X?u9>1~aiSr`azc2;TdjT-zR4gRJE&%{FErng3e_iFIn8vJDq9zc7z>Fw6w>ooY& z8hoz?FT~V5UD>Cxa~JyMcfi@W784IULjkAXqq*4`z*Ih6+1Igi6X>4t!)*WjmM32~ECpuw-v;NREa?`iN8utd4( zJzs-w)!_fo;3uJ-+~l97!7tF@^ECK1go{4mJ;Fa%Y3O6Xrz!hqc_O--;mQ58wE1z3 zoDoJZXIUe00(_6*$$hnyb2>89jO4!hRAFo}!^;z<7l_;f;F32{h)g@9pO=83j-@X! ze0~C#a_$1|OQ1?7a?O=4G0Do3;OgR*_Qr-5yc^ZfWq_(SRvKBfJQC}SMnia*8;Lab z^aKKdnd&Q6p~l)+Akf&<&R{ zjU62@p*9vSjYOK;o1xh#4$q%UA9LsW$UOdm6u(MO3zSI!QuPOq3mmHwHn z_RsR}mHIA{%&E;dH`o(x?2bj<)Xm{x>|l*E4DJYZE-Xm0a1aY(ZK6jo2CcpE18MzV?|1K;gSMVqWD5qdd0+- z2Wg&MDK(^u5yFqJuX%l7!*Q`wtJhtP0h#o!XhW;UyNDQS=CJuBDl2nbdOsSo?2Yt} zqPc=q$z?9oc*K0yPGN#7mlStKR~APit9p7O9nsFN_BKp4v9*!ae40vi!P&L3_Rgpf zS9f*l=rbaV+txIrUuNg_E@88-uh-Lwn6A3`*XrdPI9xOiqD zlIn`0?cm09B9V4p@kn!9SF|(cYQZ8}fw+-^I)sU;1Pj-kNX^39iz6-QAJ8NgN(gK0 zjJ8JGVzhWAVOqRc<57Pq17VzNDHhJ^NN2R8rJ*qzY;CPs*x1SiB|0TBU8Q;jceRUa zqp{;!^-#^@Gq{XiyoOt06zo>?;uI5#3!9@#eXOTf@+OkL_*3{X) zCeqTr3h#SIJ3HGu(JT%X$K>b9sV32HA9>&NA?sw?o@ieBo(Rx~?1?}@^-i5fN*6TK z9E9zT+e%rsIM$2_syW&fC|TItBEFptE~>e*Hj=)S{}1g1XU>8bt%}CP2#s{NHQ(4B zjdZkk1sB!MjWDljOX9RwHU3?5%+nF>hlnOO9x5d<2)y8Hf_q*(O3z6m+ z_QF^$*v^;LI<2Qa&b=-3BiAXU>Nuy6MPd$dYzO(o6ezg@OJz+%CuZQ|nxM$ug~vXh z&TVXO?P%!45ra(`ZEI*cMeCO(P8)nth%{g!?u;(Ssi>_nx?D4-*Oo*U)v}4wy^?`i z_G=XS>^SaPDwRwwboh;nW(&s8U~}PbU2EGKgEftqg=y(&?!@3}cIoEijG)Se-uA1aTyEu}r$kM?Aq9bbf7!ERU{gZi_TF#2T9f(ivqdiFk^l z$3-bS0?S>`EK)C<1`}cm&X%rjEIuM{Ec?;IYNNjCK6tm>Yc3KG1btR4&={k}{weCCNx{wA>wPhyk>S}g3Cv{;W z3qwwbbxlWiXEd_9xii+?(2|H~Ux3iXRk-NoG)hqs%0L*~s8jGS3!La16#RP%zC^+I z3Y_GSOez0efs>q-3cf(ZBZ;2gkdySC3a;97lY-YM^#7*dH!JuP3a-*`*WfQ}@VExA z7O^t2zemyAg6|Q6uTpST zPNN2&gE&4VDqfFE+Vg!4{%Zx_sL(efE>DT%Y``Vu+^^ubDfnrKr&A*OO}HfeWPy|a z+@Rny75rKS|EwaXMZuRU^sNdWQSdee-=oNBSMc{W_$LvUr{uKrN)5hV;8foZT(Z6! z75dLB_(_P@QzH2)ezm|!j)hCgX;5(0{%!?V{lU`UcPaSoihTNeeUzN_-L1jniX2t% zL4{uJ#}g)U`BblExMaPS37qt*{dJQWu^fxNFI!=F}!OuJi%t)kHrC*}Ki}5`N zC88g~CG89%9F`gb+-A86>a#VZ?5dnO8;`bD*8x`zII4Skb_zEwjX z)6lQg(1&0jC9<;ymu$~A0+F2fsbarD^(B{-^cxhtgrAFxlI6e`bJ+Nz1E;xz(w8;( z*Bp43u;bqy_%eZi*MVOw@EEO{R01`1DE@`-#hT#g8q*VT)vMI zci>fm{!It|TYa8vGO*h%WLk(BSkZ z_1x$eYVhkd_&N>Vufe~p!GEa1_h|4JHTVGyo~t?6_%-;28hoAxU#`LFeKNOp)}6EN z(a;N+jq4>4D981ATzr~e$@i(KIcV!D>95q#*K6=54c@Q8zpBA?_Mfkzmv3e`?WeVg zl5-p_=WrS>*Apkb&i?x~a{gU|tMyXPJ3bA)tS{}i)pLaGFS1iTNBrNGU!k#cq6U}i z71>F?A^l3O^UiiI)5xjT;BsH!l&|x{PK_M7o;&5pb<%0)T|$ntU%sWtSNmm>n1_kJ z4VP>O`spYolCPfc$bdiGr`t$dSL7B7acj-=?9zTZ4a9!D*~X`@bh}x&;*cM+&{F_c?((+igUV zLwidp|80#NdT*N&)eHZ0MS}iM3SOh&aegbjx=5fr6{{?AG9~Xz*K=c&2(ze?Y<2bNX`%uF}(c%#^4a zD*gFNyivthD7YGb9uzorvwD92qC&5pm+#Tg->;$nje@K5Rb0W<_Iy*p)phM%1y|?8 z>vE_S(&wc$0;l#-`}aEvuIk;X!S`tJZ!@jD{clEI`pM4~T$TSj4Sq<2)8D73ME0w4 z@|5{d#dYVbPiyF})!=e|cIsWPq2HpxAJO2y(%|xUd``W(bJi!6c}Z=Dvo$#VVLnPu z`867x{-F^!`gRR|rv_K&bJfmoYv_NW!GEj4|Ej^4bMsk!)p(u!K91T!#Xq6+OE1c# zKYvam=Uxqdf=12^1y}X{iw1u~!PWJSem6zQX}_u9x1wCyPjr;1nDP=AVQhwdQNiUe zxM)54Ck3C1JSCFzC0tU@IRqk+>@Bz?P8cPs#+fXa`a$^Jxbg%nrWr;r_rNO0(dh3{ z2}Pdh@5ALL|0@c;EGPPZR&bd;njCpXBU)9?mnAdqYQ0qXD*a~_`T~(By^9oFm7~g6 z<;XKHv8i%Y+$Us`d=*#aOW!6^Reqh|rJE{Wo|On!<@^f)BB^q|AVJ(!ISUosC-P+H zIt4FM@TCe~qTq7OD#gsNRw;l_}1Fsj)-&Anfh6KH(;1fljZWmI+Ad&1zEXP0SPXSUQT#j*ijw$%b1c=0c_nWk{ zSK!}K=vPQ$+~v89J<|hZQ_m!83>%iEczM>s+AVaxNmeX$nsCvR_&h z{8SZw1t)u?ADk*CTq2|TN?B7B zoY^nzDOK>4d9t``ZELJyxlxSQg!#Qm<~pM-4aIo12S4jrfnS9f#ml?847>p@&|-Q; zFZCt5ly@rGySm3fPH(ZNlK&6AkU8$3=A>Gi`m&E(?NeTGr=KFF)i?3$qw&A|=A!7h zUZQi_)7{q6d_%Nltx>$H5pN#%G)6mO5qJpR`YP^hCx>uxfTO*hPaYO1#*czN0(YsS z#T@>3H1!V7kJGpQpS}K^>Tn-t6^`Zg-Vb?S`Tq|iKaO{hrKctwH!<~f#TxMbUWf20 z`fUdOkXmO021~-Z)89`_nH__ImklJ$sbIu-N`G@PjY$r@bot^XN8bJ)CO1Kxl;`#a zq`NS8N<~u2OsA=7>U`-=^kKh|b2TrGDN$GIKS#q{?OqpG%A?kb{R*u3Xg`s{FDhH< zH#0^t{yKlO7=Ih?BKT3`4WjDNrpQYA^L&_aJKNz~A4}STcuu~Hq;i4Dy-Y2hFUBRd z35mOozfSN^7I)&GbqxHndaC}}$H3n&^vb;_>A(IM_&2-oQ+#Q>^}o-BpU%VM<(K<# z)&32~!2gKQFZZ)le>%^OSN{`&U)4|N#_{q$EBICY_Z$QN9vA&|-W;#~mtFMJd3(J4 zue#``^XPc_54z~5bNYDs54-56bL@Eev&6n$?Z2yzfnUZqRsJQ%z(3hVf5S2G%fI8O z>R)*b{Igv2uW;e_0V7iN%#25)T<{-pOpbX}sHC%zr#>lHFv^-DXi_c?#WeotoF~Ua z0TCgchl^B+V;n&&UH%D~OT7!T65xf%kxU)Gliv9}2fT!xAkLG>vQ#eg2P7cw&iYe1 ztG|PP6MgxERKjlt)t}Cl&iY4zx#{l}`pG^uIrZ10&`tkiLcb}PNk8?KQ~wrVZu*CX ze)aDMk|?MCJAt|BH?AXU!>AV>L;9!Sa_XN4H04Rvzz|<2F@`o z7)gI2E~oxRjeehS(1!)TygBQCokqWJIp=s$Fp~ahxSaa?HTw4m{g(-Td2{Ng??c?` ze^}_R7mTF;99&NQKhx;X6G4!m;FmY2{#_dVOB*>yk6E+e<Z@H^+PZGvC^ohvEB*?->!zpMYy#Dvt5&F|EH za;_yrxTkSYqkk)PHj?~%PW&l-{Elk$H_?~#NLx`(iS!ralJ%GRzlZsS+9Iv~MF5fN zWkd1DwE64X;CHKkP8(-11s~}z!R6Gy6wGe=^F+tVzaJ&Ur0K5(znlIVp?_E~l78|r zr~WDEAJXsizctiZNRH|!o+Ta-{Lb~iSMbxAQIpeuE(AZbUnN35ubl(x_^0!W)BZ~E zQ+yWPkdnSB^h{{Ebs{!K!^^b=~oxws^s#A`(TGlic?J==tSS~Jw-tbecI z_c-|X2!1EM)GuY;EbVvjZx!~F&ru@#FTo}GrTz5x?x-)F_KyhtVi^SmPfmMY)YyM` zoc8}pWB(o(`!9zHPWx%%ce6i_UIIaq=U@JmK7X7m_?_)vB=}`JNq(tcKKsD$HvZU; z+dyOrLTdkVT#{ePy+UJuxzO(w{PHIG<@5C#`)kH&e^g_?*}*la_Rj|-^-KHj)aY*# z`hC(+ahLoOzFnh#vy1*9m?Xc{zg?q$lh9u(6>0Q8snP$U&|fe3sQ;>PIrY=~OXPn} z{}~=<{a@GUFTIf~Hu;T>-;j&`vk`xC(?2rK`kxMdH~;Mw`Zp{3>3dyg{mV7_3uv)b z==oEUUp`-|(Vy4JDP%uW`&Z*~>TePHOQb+?_X+)Log; + //m_variables.Print(); if(!m_variables.Get("verbose",m_verbose)) m_verbose=1; int threadcount=0; if(!m_variables.Get("Threads",threadcount)) threadcount=4; - m_util=new Utilities(m_data->context); + m_util=new DAQUtilities(m_data->context); ManagerSend=new zmq::socket_t(*m_data->context,ZMQ_PUSH); ManagerSend->bind("inproc://MyToolZMQMultiThreadSend"); @@ -62,7 +61,7 @@ bool MyToolZMQMultiThread::Initialise(std::string configfile, DataModel &data){ m_freethreads=threadcount; - + ExportConfiguration(); return true; } @@ -77,7 +76,7 @@ bool MyToolZMQMultiThread::Execute(){ zmq::message_t message; ManagerReceive->recv(&message); std::istringstream iss(static_cast(message.data())); - std::cout<<"reply = "<KillThread(args.at(i)); delete args.at(i)->ThreadSend; diff --git a/UserTools/template/MyToolZMQMultiThread.h b/UserTools/template/MyToolZMQMultiThread.h index 2b26844..de30253 100644 --- a/UserTools/template/MyToolZMQMultiThread.h +++ b/UserTools/template/MyToolZMQMultiThread.h @@ -5,6 +5,7 @@ #include #include "Tool.h" +#include "DataModel.h" /** * \struct ZMQMyToolMultiThread_args @@ -14,11 +15,10 @@ d and so will be thread safe * * $Author: B.Richards $ * $Date: 2019/05/28 10:44:00 $ -* Contact: b.richards@qmul.ac.uk */ -struct MyToolZMQMultiThread_args:Thread_args{ +struct MyToolZMQMultiThread_args:DAQThread_args{ MyToolZMQMultiThread_args(); ~MyToolZMQMultiThread_args(); @@ -38,7 +38,6 @@ struct MyToolZMQMultiThread_args:Thread_args{ * * $Author: B.Richards $ * $Date: 2019/05/28 10:44:00 $ - * Contact: b.richards@qmul.ac.uk */ class MyToolZMQMultiThread: public Tool { @@ -54,7 +53,7 @@ class MyToolZMQMultiThread: public Tool { private: static void Thread(Thread_args* arg); ///< Function to be run by the thread in a loop. Make sure not to block in it - Utilities* m_util; ///< Pointer to utilities class to help with threading + DAQUtilities* m_util; ///< Pointer to utilities class to help with threading std::vector args; ///< Vector of thread args (also holds pointers to the threads) zmq::pollitem_t items[2]; ///< This is used to both inform the poll and store its output. Allows for multitasking sockets diff --git a/UserTools/template/MyToolZMQMultiThread.o b/UserTools/template/MyToolZMQMultiThread.o new file mode 100644 index 0000000000000000000000000000000000000000..f605040994fa812018f736f38d5dcfe6d5e415b4 GIT binary patch literal 69840 zcmd^o3w#vS_5UQBg(#Q}h!X2-!L241W5PodUnP)`S=neLpj5>qgoQ->8|DBAA4- zE>WsfZM7CFS}j(wT187S$kS?-+E!7kqP4;{D%1yJtLFbb_py6+b_NpL-@m{A=YKz$ z%*^+md(OG%p8K3Tvx`asQ&Lk>T!soM#yLiczG3I`J@K1~;&P&~#OM;V*|?`PIXF(n z@f{pva1eG1j$9n4;>g26SU!%iIL4{_({V1qQHbLVg`J7>SvbzdaSo2};+Tko(ih>F zgkv&}VjP5(;FzMWOK~>U^|?4tRoCa^T&}J!zo#)|PtFG(R`Es1EP}lY9+<0UC+mPfx2FZ^VRD58g;%7=j(CkFym=5UxgX|m)Y?_x*2g(gc)fnpBuiY z!0hPBG{b|;V6kljSl)zcO*!+XSaEYUUT=GMt%V8iU#m?aZW-o2It z%neWWnuTwNGR=-n>6grk5|QV46rQ}_Xm%e)(9`dN*6f}@*Jd}By&*)*&Gj7LNq5E! zGyIj58r~z>%#BBSjz_A}EOTpVCefPV()9eEx#7|RrQ(*-EC>{N+PHkKlrM8!G<}BI z-6V7tw&-4zjnq5LaMMn369TuC?v=`z-5u@VKG(CS2l*vqL(=4CEki^KF`i)^cZf)r zxwYc~qC>c3R?(#!{ozf*1ap@cUGy}$%6jD6D7vs{1{9Hnv9;D*{6SW#GC)_4XDOxL zH~FfRwxi9(n<$1@70oKTtf+D$G9&F1=&twW1yso(RTrf)Ba>(5zYNrj0dL-zU^>c6rSUW02He5!=1k6xJQo6j{P3b_fJSe zWXJv#PuFW`R6I}2K#@EyNX&4^m$`I>ubq-drb__}0*FmV?zg0+LY>crCc?ClYkisd z`;=8ZIi0?}prW&ZteaaqeLKl!#7mTDr{F@x-Rawnn3z}A!mG_pGg7kK3_qk43HY+i z@Uk>goH$V_Q7~QC?Utrc^#E}X$S6U#hF!1K*Bw3JC5@nEB3X})#`QLR-O)3VNGZFF zbbCA+wXM^)68BUPJLnwVO2y#iS_L9=d<)F(WxoA_V5m*%!&PJx>@*JEfFJnlBJu<=YER z`OoFQJZVds*9dg4lDbA77owH4bkGFCyM)Gp@cX(gXMw3T`GIJfFtWbI1KS=Hslki1e(n=)Ct|eK3z~ivLAABsayX{+D&V=jl3>99h?HS))(YMGrL2+ax=hWar{$|3Y_XJWr-X?G@hAJvybc<@5ju@S-#$-W_Y7apw8k(A)6gy zv^|S1%71x1DriDyv3TnmlDqhWbi@eF;`Qf6#Gq17N3*=IV?AzCLm81IIHDG9V(KIz}QaDem)5JNwgSxI! z&53GU*S^qTGcw3@HN%BZ0aLisb+Sr6OqBqykgpwQ>NaYrMwwyi`dTsAFgjN6L{y6M{HLiubhP#=W$pJoo$HXasQp?Wnfpx?5zHO(CE1%b z>s_~kidv;A>Qg+OL(o18ouetZlI^i3_oIB*3r!*u^UO2J10d$qulxEy(YM{Res6R% zZes0i0PXEW-N(l*)UgBUDAS+wZu9%-y8+RVvq~C`hMc0gPxA@8({OWWLW{}9neE^i z8)v>jc@pEyLvTgoOkJQ$(J7db5rSbY4J)Z(7VW(pOv&~|_0Jg1?3){2=Gy^x5yQ@o zo=h5K(%>@QPe?NWrZFgnlNc+MXVS=VJ9!y`87ac>a+$B4t}(X2kZ?tsxB<~pfE3Kg zLJ&>&c>^hlGbhkc8$f}Wt`t}D`T>2d#t-ed*Fy`CQoB!qlC71OG#;L4QJtcD;?ZK5 zxtx;0GTZ1ZEt4$^B~lO9hJV*06*(^jEfeM;-a0WQ5M4KcROV)QcOYda4KdL!jD_83 zd{!Wbm^)DIp}wHKnxE8MS+kVbJtP*a^2Lsh zoiHVu&vnv0Y-)&2P2diU*+^#c5Ib8BvB?Wn@fM>-V|w6KW4lgYJL0Gxq-mtQ7HtGo zw55ompxNxlwP3@V$SBI?@r^78>YBwUUKId! z(2N@~YKH*w`>N=dWDlC2kPdpb7X5|LSQdyF*IxlLG3d68_b=P8a5aFjJqfjJV-#pZ zAhUv*fX-vIkfsUfpEINH!03tHpPmpnoo%KPdVV2tCH}>i$cip1y3il~S}innd;qup z{vBkp!tbxd$x0!ks!?fVc{qIF9b`S3?>S_!}( zSt7WEe`n@AYi{_b%M8C_7XIC{X|n9d}oX1LcZ{LFL1S5zK_8$yHq;Uz0c zSmb^(1!_U)Vn6!HOj;W?%}5(&-?E}&Eb2A8OY+RftW2|TYv?#x+kpHYBLStOV4nb0P>wu6NeL*J9h`b^`QArof+M>x5ghX0;fP?A{`i7#Pt6!s~5zW$0p-v#YpEQ)KfO=_kPfW?3BtQHt>6j&b2OS*cKD;qd z*r&QHe|KYc=Nq2R4iIB1=A%S`$ST#mN_u2K48?@P(~}hT4ATn#6$TZG9$di$h2rXv zaeJUbY>I#GBkgX^2{gu~x`VJ$pX+kS5N zao-3Sf(C@k!3G6J#d}q-Wj0FcNL&V~5XrEZA{oF&;!=0v$wusaqby%m|(mbE4 zo`V~j*RAk9E@jY(f>uL`onG4#q?rS0q{O!lRQwR|BKB!Go+-dTY2WZ>;gHZGvSxux zq5km0l<8+sSdwggItiC*dtV}LK>{b8E=$K5m#4$idmC+4H+ z1Ljy|#}O+_GyRb#R)QOR5Y?qN?tr+%8bS%|OA!m=zH5fN+DZJGPdtE=r}H5=v>DDd zJGP|z!;%-RD+%N4N&c~*#F#ktFpJh1=7v}Js*pAixgygutW&H&nUM;t8&I(br&}i) zIf`5$gh*GRvZP|035-I#Tvy0)MVwflMVuecv&4dfur%hwUOWlRhUde(n3G^o9Z2cJ zgI+-f5-e|^wI;K)=8;11Ie1w21rtTzPLECCfYRT{vu!EeOm6nnicG7EQT`p(?E$nyL?V zP|b7(@0p};l>$X%cncbEiblMgzX>sXAy=y9+b1fMy= z=UGyRZYEx)vr;7Ds0 zIVY92*NjXMiE*9Ht})1s;RR}bi3uIx=``EhQxsmZQ^=C4A@kHFpdH7Wq1o}QSV7xH z2m-m*TktAhySzDL^y_#^Cx;cDoMXhu21=oJ$fv{5Dqe47RiOg6Ru}m|m};7H#gU$j z>S?Ju!ZxT5UaGkZP?MyOk`Gl;MUs_9R1i&ynsW)-e$;PI(=lQN63qy;&X?kmZTK$P zM0e9_0+<;vvmZCLbR%pIFD)nA%jGy!g1bbm_}^L?Fod4y@1BARp62rxZt`@}&JyVd zc6G-Ox5)OR26=T2dYT$B)i#SvZxn_kLX!w5n&Gzv5}O%eo57iBzEBoVsk!yuNb^$>g|0_J3RgKNmiZaS8dmbag6?DvA&s zGexr^W9eGdNJPRN^w=bx?7(9a8coo=rw6&!<8qq+Y$Hp^$ytrRrf^>=)Q z3F=#r2<~Y<%&bHq5jP^HlA^L+PUkR9{<3k8VWucT`@&PRy4W0$sCp=U;>-z~gGhn3 z$e7LZFX|6wSl9DLk{@>8B^o5mPW5~>I^!R)^*GVb3>I14AAKFGBC&k{A9}iuru3Ax zs23|)VjV43II2)$g~TVNbuL*j(sZ;7<%L+mRN~5^VDwx~8qTZ71Tp(~I&X#I!jjRj z$ihT10HF4kdQcLV|FURrIzQ z?V}n-NF&5FluQzGP7WIg{~ddH1QnI2&N)#>$4h=Iq%(KD9KuGVf2MdkFD5fXZMB^P zUQX#_-ir2W8}7HHjTD$*^<9imWWN#9uL@j*+~_lM@wyXnCK^mJX)&gIhLw4=q+k#`_YN?$8fLXIS^4ustb=@4b=qyaSeq){z-l zELoEB4k|JOd13TJs6kwBPNHQyT{sW;TxRr9q7El6V~fRUyRUky6@`pf<_@%T-p(4uYyYsM=^ug|7DaT^&u?VcMYO0&#y}IGbGtOvjs<|QuM@nLoMKW#u=4ml{HO`xNEBkc^lj6>%EOlA@5by^>uS3J1iR3gqm6eWlMGAykKQb zZLsEwGrX0Rjo$3}wRJVM-nv%rx36x?@t$)wl&fn!<($^KtC1T^wBlFS*xb@obH=Hs zmMxs#)Kq_Q*#%{7^`SaiB&?oOfxPyIZd$M=Sa(&BNc;Ly_>rFF&NfbiYbiV}*L@XT zrwxHE~?wf`huese{4l&+yyFVRb^trRbKnA%xhZ?^e?7nxX z(S0aUzI~|swxPz#A#}ZV2;tEogn;?^^T7Pk6!#BOjjbu}7gCIOQry2xH9k*qf0<(Z zF4eu#Nszc64ACJ>3ZjpZqR&obsjWr(EKh6bBy6lzqkA+`y*=7}Lyqyt3GN$njNki+@TG5GM3E=C!Tz3;hT;18lPLD{lPFO~HWJ;O zO^INL?@h#Ao%@-=#v*qoijRB}-uQ~kxX0!GgUfir<^G4uc)?X*7%S4;kE9vDNOSK_ zHQL>v`_k>kj$)YY(Lu(~2f3dbWc+%td(&WJ52{EyEL}8RydWXl%h(GNfyg}mf&>N< zp>O%|L^t6D3BR7S>Nc5nsckJOr}C5SbBUoE_YyohPiWZD@?vz0{r$4PJMhyt;SlQI z{~Paf#5Jbp(Xofo*(R2S@kkX@+%8NF#FPTlNi3FNjTj4Ioks~2Hv%lC9i%|lV5&g0 z>4C_CEIbbObecf?5xmbqyO91t!kQz)32UwYkt>(Ur7Q#we&6dy&1iO@ry+fziWM9h~-{CmH`q}#7=jLAf}$q z7m!>jsvfe#aGhI$`on*To`N$fi_SJ4HBv-m5jdBEab*JEMPOShRQ;CXC^=KskMM`z zj&8;<6@y#Nt*uvh7S{hJnvt1!eW|BBKK=?o|8z)-@jNDp=SW)mOOMX7aE)Ry7+!48 z^ei9ed2|xbBhBzJzUZfN6dqIb@2a}k^F+PkOdS->j4risjhY>sve`CT)t1rwQdmqA zuPWgMvB)!bkgH=ShW1=Et>o6%G|QIGWy+VP;h`|dT8ax%%~k);#r~%AtFx~CFRMnVms{*L7s?tEMHXrc8TrxR4v#X;1V<@I)_9r z=O6MNGTN(&%q%j^?#VObdg}hD`x^07#tgeIiA@w#9Gf4=^d)xbuT$wu)Dnz-C65$W zLGg=CbI5UMY7^^9rGLmrPhsr;M$d=r&>M2PNOfx|CKnCC*4Ana*D<@eK6Z^Mh7QD8 zR0+JH=q6^dR|_59XCWs|Hw(W~o~woFGadr&rpFoRgY?T|Z_;B`nh4X9EBQSIl~3C6 zjCw&+WqJrFE2r=ptfcBUpY&d9ok5y>6J>wj_=(O#Yaf3eDrRV#m8c`6GU~C6Y81m8 zd#4VJo_a0h;!VLdI+d0W@WeSRm**)`yy^6si0;KwS1hh~^8f@TkfOwG@}SGX#vf>+ z{wSJykl<2kN=qOKTy>189_Q-^R?;#yTnCUE_UIkL@~*p$=o`>tdAxL~zKYGGVYrTJ zG{hpg+Byn1T1v%+psOYZbWMTHc zB6#(_l#tN_8y`mO#iB*b%vx4Vyl(b&FARs?LAgoT=cBrWYt{2@wVEW$?Nw{J9bBOs zm_pGOYmGQ)`sg1muCvjq!vCb}*f<`w0%D~tVS#^Vk{Rt6{h8%8lO9>nPgx4uuBC_- zD5}&9F&?bL(i?xYqmr>_tTbT3kRNTw zo7&ggb)y+QoZ1|i#LfWJ_)5R|5QXMMrR?D8+y){^z8+ULrXPTX5=am1A<>5+4VKkZ zx+&8&^WRehjJ1zP$G}$dVOoDJu9}vZx%G2avgJ~3T`C`J{h-|*qPdbKD0UbTS{2`B zG=M^nE=aDb2XaYUgGXib<%#cr~hnG2f%?9lzRd%)60PVG)R zN$v9S*P?hJMkzgU0{A!FGKLi{Q#;c!U^Gp*yA5z_ywx8>8nTj_95eRJb+^x0#;O z4QBT-o5^s!Tlr%TLPSVdpD3X92`}x4!O8+w4F0SZ9?-qhA_Z0;#CpUcy&j=qENNhw zB1gfQv1N)Kuzgu%o?Mx@6*n}4kc$@gKzbn3AeSk|3p%w-@e*RnB4U|>cGF)8;ZTYe z9I}PwXfXo2EEZx?T9l{ODLkEA}j<<^NEr`_*a*9Q`o77mg;J$h4ea`>pVT($Jr zicUzZPGm(3MDiHiMw9*mRyR?2IwaHwG%52jOQ5l8c)U+6qWuMgX5_d4cG>riKz+kT zjZE=z6I21bG7;WV7P%hY+2}1R{HiScWtnHn-)J5`Ts{id-4G^k$7B0CwKVO4oc*Y( z*BvezwM8fqypXZ|`k#08EVviUv#>V{)c2Q#_fG8G*EWZkohg`MGZ}V{r^$vOfaMk_ zFe?A$uAYF${5at8KZ{Iad2cT!(9dGVsXnAqmb1sdn1n1^GzS-?z_m5S;r+dv)p}U> zeC+W;O%D_{XNB@(ZjAkXPysbN+lLE^v;%?2Y_#Hy7&m!q~q-VE=+9tfD2`4Rj<^gc@B^*8lC zx$sU9n&H>Y!dY1%^f+(FMPbuLq!#AHi}7%q!oT@FQ~rz&C3F});}(?^njBM>^Edxu z_;24L&&?l0f82;%3@HxphAv^+f|K9l@3>jRbs$hr{xClVa{^bv%9;9ramnS@>^gnlW64lWMw?CM$A zK>3E0{2rEhwZEZomN&FBDWA>AXEXA-xvcPgh4tk5t{ZEUKZZs79fvj+K7= zHQYTeO)Yp$MAprYYqAW#r{ql>ndn;qfFg^~SM`Oal||-*_;t~27QUtPGF0!Mp_=W_ zd5i4b?l0VO-I3;6G9!*Y++M%7IJ_5mdNdIJsw}c76CB$fHVeo2Je~Jq`iS?eyk?{! z+brxgQ97O}e-A5gfq>SJVKI{ULGAb2){xqvDS=2E zina|rKS5PuJYy;!9mx5_T#OAcJk(~4%AhCq1|jFz@v{XFHBEXT5iXgCXI3WC8(Z+`N-2QlIdV~t*biwIj_|2( z3e|(>GpPJ9@=$zB;p6xRTZ%ER>6nS+gG-`r}VTK9C)uuWqem@w| z)R7d&Ryrs?IZa$aaXirziiDv%!sXZQf;+0qx*5OqnzFj zBlc**1L(r=(HL{0T7IG7>~r)Q|3sRXit#MH^f6TTCErFeWDz=|ZIRoCQEMpB)?Yam zLP8I=?q?ywk0uvYNp5798_K^SvD>D!lHSCnA{rAD8wr%dwS@VQ<@ixkvL5uBc+0O0lmj8 z1j}4`a+XQH9X;3}DtD_<4iZSzUaO9tS|Zs?YN$+ddz;Z=+H#!v0)hN!)y()ut1d^`(-#G5Gc9Y2cQcd&q5ON0LO3HLAl~AU(lKbwJLZ^5QXXt3B=x2(FUfxo z8&IheL01^>k2dP@4)q~jYedP6@I_~#E0X;pJ&qNx0?V4*4Fg8UpocZ_u0SnSXEF=h z|0t|PJ2h#E0=boi9rDe@*iYypK43vxaiCu>Jxs8p85;Sy70-l(LUk|AH zt{GA0j+A(#O^gpsjbcwH(8tFX<$nbtJXXrayU^K*Jr#u9NSRjq#f!Au@XOBOn68?`uQ_rWW$ zdJjQ4i|o9`4Z7c6w4NJ#Z$qx5qmX;LS&v0OGL)+CYJegVM8!MhNHx6oSTs6is#lMu zAZY@7ybdPnD65Ltjkj!sa6*>D=ut?CY>0r*Se`-`@D#}`a<;tvXqNi4UF=bxpg%8v z9PL+|(o)?JoZr-PMSi~AvQ}B$GOx8nkR<%G(vPRqc68(LxbE~|OG{HrWymI8Gqv)| zA75N_!GTrKNyT~W1iKA)Li+?dr<<{PcIlm{|Nq60zGPfs1J8cxkzJ z&mEte?;V>rc3l40`~q+GwBQ`CSshXlr;Nkn*%2yj%GJ}1lm(e7M-LsGz8n=X6ZkPI z4t=`_gypGg`c^On>Wr~$B*JO*-^|Edl3JIMj!UAi!a?D1c@p$;Mbt37gbDfzL9gYd zTwNXz+6=yvD2~=TQM`{3gp+V!tpBEE?9nUay!;VRAW zrp`SyBMV4TM*6u!ieuMEO!*j!d{e+H`54Jkx`5DY3~uit3}F=x!FN(d<}%{DMDV== zyr9XXjP#NrLXQ^&^}zKok#2?3V+|P|a&AtYl#z9lYf^@{J8e=%_A+--M&6P^MHvMh zgNrk&T(`S23V;-4WP<{fNC7>N6#FZUor>3HTpNDUxrlTw$?$fhrLK||0QI>;03*vH zVKzuTm9VqW8Rd6zOkpZ-@`v$?p8QeJqZG=NKah-_DqinIApfmZ7`+uI(!}lJ z7j~&k5F_N=rP3U!(-7ej*D`_wIM%87Y|QMm-Z#U49LnvyjO}U2j7_GIU4ZEaROB=zsM`j!q!y$BJdN#*LNawT86RLS3V2=^7c{FIOmr5WvRSIHom zBTLH?5UFFullEDfxn(y-PE;yIsv5+YH3~FAepFLN*u*o=C*~p6dMLiiY|C z*-l?9+7!BV3fMjle*cJ#$2grU)~j&TFkHroiGaFuiQXqU%4K9HgFsy33;fAZF5^4N zpr98+#iTfw_*#Dw4F0+|PKomCnZI=4AHaO;HM{H1h~X61|&zh z3~JRA168e5hKoK=k+{UK3?$OiolE@cfE`|x3<~9Po>h@C%flOGRat^;#3TjPL2o*m<3xkK>|_BlbOE2YnZR-+~glj7wta z`Gw1v#cyNRF5|M;HNS8fRs1$~?J}xk*Zjg|)bQKbwab_jyXF@zV=ljqUAv5Vv1@+e zGV1tk?Am2q9=qljE~B2`#;#pP!VH1o^eYz0!DU>T3<7Z(tqypb13upYU+92eqhhHgr>W6qp74>g|;pHH%2Mc_BQV%m! z;K}XDFhy@|DMu>Yn&Mc6)3}z0(F%{(7#Vw#z$b_?0jEDj(NEo?)n>N8EUgS)UaG__g3oi5wcEN=j zU>96+q?HuqGLrL4L@pyazYvK@%XWjNV^MzhJmWeiOQfw+vr9PkVW z9N!AG2`;gcksRSNh9!eQTyg~=Db5vJ1xbu@#a3ag_z{VKx^u-w35n4zV`L%_qo3O~bpBjOm8q1K@f4;zV?&Z205#0=YRBF8c)ir!Ia67qc9dPLK9HR9<5x5=yJO}(7;CB406~0Ev6KjOH?gT#CI9+QY)(7ck zwZIEwxYqM!2mA|xpB8w|S?X#Fa67xDqKUV|Pj$d20v|1Y8BQ|R{I3)E^` zd_3$`@qclk?^C$$;%Tjt!m;Qw?ecX7aN)PgFq(d*!rK*2>ys3IX8)nac zJn+#*d{!ye-J#DX4)iCWLmzF#XPdN6PT^YMcKWY(z>h-y?dWGZ;I}*AKXbqzcfg-@ zz*Dg{Z>P_2;8b4aYQd4#@F^^CpnuT;&&CwRj{g-7_(}(SwFCaR1AYqhwv$`vfZybR z?{>gxDrLt%i*TFi^|u}9#{wV4)*yTR30tY^K3;HTiM)VK2B0MGVYIPUa z`A-6`iQ%Z8BJ2XLWyKDa>=jT;c1ncohs{GcF zzoLMk@%4Dq-7li61yoXiuUZ9Ls#}94gPC(ncCO9SUmDhgCqwzkc#^jF$#{K>7sx5O$ht_TGRDl6-n z>Y#y<8so1Zmi%k zR`Qp;#*qOnwoTZYRXm~ODx%@rtTbj#XW3Gv$?SbZ$1=a3!2No5+gxYY)`_`RlbNQ4 zu}?N=%BdBh2{oaG%|Xqdjg9~bc&7r!c#$rj9K%__;YGMSRtB9?f`X*6goikWG%(E7AOJMhIc{* zzOorSNYVo`?&p9+*tNN-zFu6Au|(fZZ6MvW1Z$+}>1Gaw1;{L%xUjM^RNK-tAEiAH zpAQvHW%<+^1I=cAu#pUcv0hzMYbAQXV9bVS#bu9(7`whu)RF0Y$fD`x_erTyu5PeP zsRoEruukM4WISb13$ z(KKOheOqg-9E9<)B<4qDaxf_x0!c7%x531$42=gSH8sJKh6c0^4VJwE~=aUh*`e-cYg=*vNr))U&4E5ihBp+llnlyo)lsth3bdU)&D3B|M z8dxx;jt174iP}vY3TK4sFzT)gw&v$et*ft}F6wB>wDR*RDidaHBms6CuZtZuv9f7y zrThZ*+=dYR>uOTszqG;t#mXva@FS}oW(svzwgoGjn_5x*P8Tjv(;(-^`LQLC$~i$a ztPR1&5G_l@VOo|D{pNo&L)3<_4Ar88;kiOt#Me>87sKQ3Ps=bO*P=GY^Mx;uA2hp` z356U5jVld?{BZ=tSLx{#_de`^WOuCA|ctO?F`D1(Z;%4rp% z`LtCnKfgkJIi3DDf1||~6f!1;tbr}yiw3fLuE-9aKRdPdLg62bL0Nom~*kYrEY85KJ zVF4bv{Qk|gQ&N{reZ~{J_#7ZMB@(!*6?0mfDr?ag)nkYtsvt(x3$T2OhM~ghyGflQ zM%xzmPA0*s9>z;pYOkJMcU3-yHkc1%NL5LzhiDAvHc{`IEGxFwk}M-0Rbd_MQ)|Y> z(NL|m&Q_wIuj<*Vko;1sm|s|PY+1yT**U-{k7W4|5?^7zgd~&N&g50;HMg|{E3c|+ z3AI(zQxiPS6yaq2{~-?g_zi_|5*9lo50NOZwk1{47OJ z`mbX+e&a&~!hg$f`dK>)WQX-QH2=Ra{27K1$G$=e#HR;`rl&9WQy@MYaA>^T0l$Xf zs~G(QjQ_I?e}vKB%kYay5d=DI#G&N|8Q#P2s~FDxww>Xd82zmb=jHo4!#6W}`fUmd z6wS+#emjE#nU(5@*7I0KzlGts4ById+E++{_&BojGp`3*NlD}qaQQ~Hwf1J9-(luC+*tPb{NI*$qd)~)rh{B z;ioYEygj*z;mGKC1+HNN^{AGquV)!2!UhaTj#&FI*#Bk1knZn5r|HSzJkkS8{;rA$< z>|iqdK}P=y!=Ge0*MBp^xt?z@oa^}+!?`}LLm&)+?D;AVZO>s0=W>r{IF~zt;au+d z4Cit$Q8?Mb&*auHdTxg+8P4_j0mHdIKW8}CX9L5zJ})qQ2h-rgO7AUkupK82GV&cUJWKZenBJIrGEYm7d~@YfmM%5bj#Vh8+(4Cnd!1;cs1HZYv? zf6)Oy3HvK4kR3MT(D}vZB19mZ_d5ZFlYOS((Dauw`YjBneWMhJ&sH3oeu=_Q2FB~@ zEsX!UjQ+=r54ZpC8P4thXNGh8zsYdk-+jUGB~0#6?BAq7`oDog>+e-K>3;*m{fwTE z(}E1=oAe@iW7Alqv0Bh`jA(ue!4SK*{TZ%@W3oXT-3(`P)R=l(p2;k^Fd$?(50J`XaS>;DYH zx&E&)oa@=kaIVi64Cnd`R-5uDzg+II4CiuBXE>KTS>a?K-ri1S^jx278P4l_C&PJt zzlGsk|CJ8-8isQ}c~aq2?%dB`bHG1fIQLuH7fgZd#`zCXIPqVFL;L?IMt>i}r!sn8 ze`^{2Hb&pf@P`?`Na1AvQiewu&d0sCFua)2-_H2+aqlXI^Y(MC!YRKuFg~9;&<`FC zA_Ob_Oofv@pTnW;b`8US$M6Rk&gq|HIH&)B;cqfNUod<%!-pM?8wApa<0mrwEyQU( zPjSGe2YiMDeyIaq&2TQamf>7(lLH=NIOlVl!pT2*KX@mj z=l$SWBS45i_J1FTw)52tzm4HHGMv}P?F{GiI~mUD%Z>n31d{t64lTEa;hdiKp;I7w zj(0lXKXAaGU^v(FS%!1|yBN;tKVdkhKjuiGrdVeP;8N%71cj6SyBK~lqo2g^(-_X% zld~CK%;+aE{=7Z;=|~VEklg(^v_6k9oa>*LMbwi13!F9mB!+YP3lvWNR>tV78T~&Q zejUR(pA`=Hdkp`O@!8LC&Zp)m5Ft>0IsJTwbNb~Be-~+Ve(z`azZm``!#Vvc4F8hR zf2we@&v^|0g3*7)@FCyA4Fc)U`Jc>i&fiox@joAj*1wF=bN+n{=i{Pdjz(ex>Mx$a zq4}JtaMI^%hM&)HZqJ(>@CO;r`{j|xfGGm;=l$J$9Gd@OUfdv9?U~~hu)x>jQqxahIJZv` z!?}Hap>UG>TO69tv7>N0$h*GkhDP=lO~<`WcM=D+l`7Cx8fn z%IhyUwB2?yoVR-?`iNT6bG(S*yxn_@;k@1Z!MEdl=$&yb_dbSm{!cpKFFN3xPmJ?V zVf^1af%x!t!B;?;+HeY1 z{A&0-1K*l2MYhG1^>0;v&w>>ru2Wqf~PCH zoH|4@)OP4{$@>e2CgM{R{a`kqrSjUW=#RJHpDMZcTJW&KS6lG43V+OkzoGEoSn!Ps z-)O<7EBtv2e!kLky9L+Z#oS@RYZU#P7W{aHziYwi`$81nXY&iPPqVUn3Y!-Yey76E zu;5Dv5f{UlVZm$CCEm@(vBam(E%Ey-_{oaT+ZLR@^F!ft3!bC+x3Kz7{5uu>)fW6Y zg)g$;=}MpPTkuSU-)zC@dp;Cyx8S!c{HGTDw+jEc1>d9ahb*|(=TQs(fueuPf^SrI z+hD;DQRVfV1=n(4wBWy1^siX(Unsl1Wx@ZT^m*TczpVItWWiHZdF`{{A1eB=P-p^^E-~w^Zb67;hfJD#)s#Ze1L*AU&NCF;XGeI5vas@zJ8`~YrdSuMH?7B z*OSJ;6o?PUcL_8Z|H1*MUxA`v<&*1xpXPv1cfgw*@Wl>z*a2VVfQw&Ov(f(#4)lDV zA-CJ74)nvRU~TxH?0}DTz~%Q=lkI%D1O2rQc&7vYBM1B`2YkB&zQY0EUX2!|o?KqK-=TKqr;NVDf&L?8a=@Q-!2eVEH68e0qHt?|uXLdAaKNc=rC{}|RT56ZY3+`JmHs&i zC*j-<_ku>3i`RkvB!yGClMb4`fYIN_@c(K4MNBSl$GF@FnLdXpdeWczGOfRwjtKj# zX85U!p6toTIj1w6k2l;5=g;5OV>i)1(pW;v9i`~WPbM=wo8iR_KSklB&+QC9l;PYC zdi+E5Tu(J^68XKK@$oA@WS{j6pQUi(vyI`64CivMV)!GB{(6RUee`^t_;WsYD0U~C(D*9^A`t!p4voJ>AOi9EJr0e3 zNFV~?T<(5>Cd=(pxK-{T)&E-M>h%w++~Zh({UUg3efYSA@~H8%89lf2xeVv@7c!jZ ztD525p0x_M=1Y%%tohRGN!EPvaS@lh%t7w04sw6Ya9)l-V|;iy{*vL`4tm{)^yGfK z&4K<^hI9SjWjME6RN+>;?PWOEU;aKovLCvrAweKLx&C^6&Z_?rjGpW1WjNRKB!ye` z%w;&&Q?IL8%XbRn&-FZy;g28>+HWsqIB&0(Gr7FIx}VYW_UZ|SbN_#y;k+LGk>Ok) zz20WEXD_4Y{6BWUzh*eM{~@XaBmUg}nF_bses)6)<2QXu_T;n3yV&iM28{APx8`}~;U+&=d({>vDD{eBS1<@V9* znpXStF#g;=FEO0!^E$)1KJPf_vzy^uAH6TJa5xKd0C0zE=KkIMDB6IJeKo4CnUwLg7~X=%386`ng`;rDWVb{5`V! z5vS|pEovZ3^t}Dl>(GSr{H|s6pD{jqz1B*HG}cta)S)#{@lRu zCE%y|w=taCt)1bVkAALVweu~Ep6B-`4Cnb>rEsgAS2LX3`7y?a=XX8Bxt=M5h1o>8 zbN|%OAFTQx$>{HA_PLtjJYV{`B=O<-TBhi&`MQO>^LlzxIsgPK{Wykm`<%mYZl4B*bNU4g=k%MTSRwZvTx-Am z1H-xge`omb82zDaT>UP?&rvuHvo2)#WJdp2hVyyldkiO^q+qqrs}fGa_c`ESJK*$d z^c1Z8oz|IkzO3|@I`HB1EN;(54)pprfUW!=ccA~H1OA2s?zGN)I2#XhJ&$(4^BnMU z2VDPNp*3Gk4)ixW;I}*A4?Ey5I^eH3;2%2Rv!$B|KZ)X6*O$i}@WYw?sqSd{?>W%x z=TKxH&R?%f5YGAJF#eqWZU_7$hJV2Hk-s}9{h#wOnH_dB`g&Ybu z`N+4mJ=ZY&Ifnn4;lE?Ji;WXF{fP|c^m7@`={p(D>3_lSB}}f~-$D9w{4GY$>!be8 z09B6?#^)$De)tW;y$ZL|f7^k+*n!^fKtEsMR4%+6BaEJx<9m#rm*aj$zm4fX{1DtA zSnYFy1O6R`^YMHh!#@E(?I#5c=k`CF;h!@4YKHUiVI9M{+(w3T{#^{epYgw$;WS3l zdfv`(&Y$c;fy$SzHT_Tm5eQ2~Tdi@u&xFd0zSX1gi3|BC1Ww!mb zZNk&>FH;e!;uZ9|uhcx~d?5NmDAE!L8tTA*2&32X38LR>wg!Tn_~>T}l!o&eJU~8r z>}uuX86cnGjGpVG$M(dZ+nMXh>Az2eDyY}3g#DLl5NFPZ^XGg89nE-o#9?}O{)Bj;n@sd$?!ae-@|ZyNqlbAH#mFMHMIk7bxkL|AB^xp3^&y^ z;i}6L$Q*|2yc4v5;i^d!2YDIF8LppW5VmKg z4dSfprlwz`;>M6+5D52(EBS8%!-fAzeQ=yjP=ilBvF+u{y}47vWemPSvk@F zp5a8VdA-MQy3{_s--3&V2=OT{{6nB<;-mLL>U}|kAB{tY2^M;viU=^A(zGl4&R{t4 zAEW5+VmR^Fw!4qvM88*pZEn-0A`^_+!X`z8HOECsEazH1r%NrHp=m)p9qAyhrv$i+8w z<+)a0wFK*{bLqpUMy~itgWTDzt%d+{<>yyze_01V7^9LAmGv8x2|s{CzdbPka{85x zL~{P?7vkIf2Z&BiZTpKix$|o9Lk;3bbShzw`k=@w7o>k1C%2`E%tD6C&&7``@mu@` zla-G}b*o`4ALkUm!IGGpgQZBe=6~rk-~Ij}vBdn@e@WD)SRDwH3z&T%IQ|&G;niGw>gJu{aHPL-BJV+UZc!l7H7F?wPzsCev{t_#ub7D}wb4W9}?I z)carhO#u50{P%vMAkkm{leO#r+Ak3N&t+i%zchHT-vRjFq2zz!+y03;v9#d-q0ja+ z8Lgped{DVrRcQJFpUUc%mTF#`lRsc>`4YBdCweyYI+w(tg3dY&~GujSJ2S9wVpE0zR zkd!D^V${i4ipBa9h5h4bXJ|X-2R5K>3J2?8U<)t*H-3pt`K#4A@uNu9^|=T75k>rR zXaoJWiILmb6bj~+o;TwZRAT(Ln>1ChwsLMuHGVoVcV0^qD)zq}9>5|1pGRwk6V-*D zcX#0a1{`Wy5j#7j*XtX4&Q0kr8bJC>K}&(BpNaea%GajRYYHU)rU9gH2Q39IpPp^@ zSN;l-IzIm<`4GgUFw;$=$-~7|sI0c@bp6m8Ey?$?yr~hyO>3vFmIw^rb3=wY^e@;e$6|`3Q zp8&IypP4JACOjJeWR?FBFgy9}HuA}rtn#T(A^E8m|0z)Nharv!lEuTBD*adv;@Yp$ z)0mqFl0`wE&vi)ufJ*>NO$twRR4)WJNB~$EB2}yn- z4y*h>ImmAxFB9s0g^Ep*|GnfOfBDlgMU}EY$v+c^RsIbqIJ^Ak70C3PAe)q{3!|Fdo6uXd0xj0O7N$0W-Ot9*JN(N6wO+9ZZRdxaTU#r|96zvLi)_cJm{l}btSi*Q)wAA|C-vw!9pGGU2IuTNI_^nRjU{>>hlVu4CX z@+af4%AeyPze>s1^omWA|5ZB3@9B{#bUh>alo!oU~Q+0mXg^_Yx&=&^|z$2-7XU>SLsOq^Kn@9AM3W4?^PAg?NSWX z$twR8q!;$H$ltHzuT(h6zraR5?SHV#f1W7?Rvje&dmZH8@kg0nu@UWGg^m0l53-lP z+%MD5v*gQ~{~sc~UH*3}`8|@2kUzsl{v2%nvdh14HqdY5o4s&vm0yYUcJf!!z!G6M z>_dUmq%6Wo@)Y}TmA_HTk5R~rrLW5~ufMc@YL!pl#iS}`aWiAT zlCS9{w|IJuzv3WY{MZr_>3T}}zXXR)r}14#PxiNtKgCO3z>mklgEjvn!I0AH?;~hh z?}hTpO0VT>y5Ruq$}j5;Ns*@rN&m}m==56uQYGK2f1Z**Ql-}?t3FLCy;Xm+pZZ_! zpucIOf3=PNw>#)RQ_1&e#nl<)34wl3&(8j}D!sOoPGn90D*$%(U!e3iRYJ1=92`2m z=KCrDJO61{@<(gI>g=`LABy^Ir~d=})ZdNtcKY|&=syp#t@_h`eLMYol>B2X`da}${(d|CMPmp0gZck64*F+NV~L>4S>DC%r|+M3(7)V9 z{{|$o+W!Lw`FTpdR|`^SonFJc9OU0&BfklWbb6hyQP>=8=YQo&et{O`ApaPoxAVW9 zN`93}NA>4Q99H?~ImlnC=wCn~nU{4)VA6Q~v!9@^{!-Ms{6zK`G4ATsq9>cSQU`;<=rMJq@Q}VUHXeQSC^O4@p|9e#a zwV#pyU57)b*Lb^ -#include "ToolChain.h" -#include "DummyTool.h" +#include "ToolDAQChain.h" +#include "DataModel.h" +//#include "DummyTool.h" + +using namespace ToolFramework; int main(int argc, char* argv[]){ @@ -8,7 +11,9 @@ int main(int argc, char* argv[]){ if (argc==1)conffile="configfiles/Dummy/ToolChainConfig"; else conffile=argv[1]; - ToolChain tools(conffile, argc, argv); + DataModel* data_model = new DataModel(); + ToolDAQChain tools(conffile, data_model, argc, argv); + //DummyTool dummytool; From 854303b422a10b63563d0de884e557fcfe41b38e Mon Sep 17 00:00:00 2001 From: Marcus O'Flaherty Date: Sat, 20 Dec 2025 10:18:13 +0000 Subject: [PATCH 03/12] bunch of updates, n.b. we should rebase and remove object files from git history --- DataModel/DataModel.h | 88 +---- DataModel/ManagedSocket.h | 6 +- DataModel/MonitoringVariables.h | 11 + DataModel/QueryBatch.h | 46 +-- DataModel/Utilities.cpp | 260 --------------- DataModel/Utilities.h | 152 --------- DataModel/ZmqQuery.h | 36 ++- DataModel/query_topics.h | 9 + DataModel/query_typedefs.h | 9 - Makefile | 19 +- Setup.sh | 2 +- .../DatabaseWorkerMonitoring.h | 66 ++++ UserTools/DatabaseWorkers/DatabaseWorkers.cpp | 297 ++++++++++------- UserTools/DatabaseWorkers/DatabaseWorkers.h | 41 ++- UserTools/DummyTool/DummyTool.o | Bin 52112 -> 0 bytes UserTools/Factory/Factory.cpp | 6 +- UserTools/JobManager/JobManager.cpp | 3 +- UserTools/Monitoring/Monitoring.cpp | 80 +++-- UserTools/Monitoring/Monitoring.h | 14 +- UserTools/Monitoring/MonitoringMonitoring.h | 23 ++ .../MulticastReceiveMonitoring.h | 36 +++ .../MulticastReceiverSender.cpp | 174 +++++----- .../MulticastReceiverSender.h | 20 +- .../MulticastWorkerMonitoring.h | 28 ++ .../MulticastWorkers/MulticastWorkers.cpp | 88 ++--- UserTools/MulticastWorkers/MulticastWorkers.h | 22 +- .../ReadQueryReceiverReplySender.cpp | 305 ++++++++++-------- .../ReadQueryReceiverReplySender.h | 19 +- .../ReadReceiveMonitoring.h | 38 +++ .../ResultWorkers/ResultWorkerMonitoring.h | 33 ++ UserTools/ResultWorkers/ResultWorkers.cpp | 212 ++++++++---- UserTools/ResultWorkers/ResultWorkers.h | 12 +- UserTools/SocketManager/SocketManager.cpp | 47 ++- UserTools/SocketManager/SocketManager.h | 7 +- .../SocketManager/SocketManagerMonitoring.h | 23 ++ UserTools/Unity.h | 8 +- .../WriteQueryReceiver/WriteQueryReceiver.cpp | 211 +++++++----- .../WriteQueryReceiver/WriteQueryReceiver.h | 4 + .../WriteReceiveMonitoring.h | 32 ++ .../WriteWorkers/WriteWorkerMonitoring.h | 28 ++ UserTools/WriteWorkers/WriteWorkers.cpp | 99 +++--- UserTools/WriteWorkers/WriteWorkers.h | 15 +- UserTools/template/MyTool.o | Bin 19720 -> 0 bytes UserTools/template/MyToolDynamicMultiThread.o | Bin 46912 -> 0 bytes UserTools/template/MyToolMultiThread.o | Bin 50032 -> 0 bytes UserTools/template/MyToolServiceAdd.o | Bin 39104 -> 0 bytes UserTools/template/MyToolThread.o | Bin 37352 -> 0 bytes UserTools/template/MyToolZMQMultiThread.o | Bin 69840 -> 0 bytes 48 files changed, 1443 insertions(+), 1186 deletions(-) create mode 100644 DataModel/MonitoringVariables.h delete mode 100644 DataModel/Utilities.cpp delete mode 100644 DataModel/Utilities.h create mode 100644 DataModel/query_topics.h delete mode 100644 DataModel/query_typedefs.h create mode 100644 UserTools/DatabaseWorkers/DatabaseWorkerMonitoring.h delete mode 100644 UserTools/DummyTool/DummyTool.o create mode 100644 UserTools/Monitoring/MonitoringMonitoring.h create mode 100644 UserTools/MulticastReceiverSender/MulticastReceiveMonitoring.h create mode 100644 UserTools/MulticastWorkers/MulticastWorkerMonitoring.h create mode 100644 UserTools/ReadQueryReceiverReplySender/ReadReceiveMonitoring.h create mode 100644 UserTools/ResultWorkers/ResultWorkerMonitoring.h create mode 100644 UserTools/SocketManager/SocketManagerMonitoring.h create mode 100644 UserTools/WriteQueryReceiver/WriteReceiveMonitoring.h create mode 100644 UserTools/WriteWorkers/WriteWorkerMonitoring.h delete mode 100644 UserTools/template/MyTool.o delete mode 100644 UserTools/template/MyToolDynamicMultiThread.o delete mode 100644 UserTools/template/MyToolMultiThread.o delete mode 100644 UserTools/template/MyToolServiceAdd.o delete mode 100644 UserTools/template/MyToolThread.o delete mode 100644 UserTools/template/MyToolZMQMultiThread.o diff --git a/DataModel/DataModel.h b/DataModel/DataModel.h index ef85719..9ca8a5d 100644 --- a/DataModel/DataModel.h +++ b/DataModel/DataModel.h @@ -8,6 +8,10 @@ #include "DAQDataModelBase.h" #include "Pool.h" #include "JobQueue.h" +#include "QueryBatch.h" +#include "ManagedSocket.h" +#include "query_topics.h" +class MonitoringVariables; /** * \class DataModel @@ -28,9 +32,9 @@ class DataModel : public DAQDataModelBase { public: DataModel(); ///< Simple constructor - private: + Utilities utils; ///< for thread management - DAQUtilities utils; ///< for thread management + bool change_config; ///< signaller for Tools to reload their configuration variables // Tools can add connections to this and the SocketManager // will periodically invoke UpdateConnections to connect clients @@ -44,6 +48,9 @@ class DataModel : public DAQDataModelBase { unsigned int worker_threads; unsigned int max_worker_threads; + std::map monitoring_variables; + std::mutex monitoring_variables_mtx; + /* ----------------------------------------- */ /* MulticastReceiveSender */ /* ----------------------------------------- */ @@ -54,7 +61,7 @@ class DataModel : public DAQDataModelBase { // and grabs a new vector from the pool // FIXME base pool size on available RAM and struct size / make configurable // Pool::Pool(bool in_manage=false, uint16_t period_ms=1000, size_t in_object_cap=1) - Pool> multicast_buffer_pool(true, 5000, 100); + Pool> multicast_buffer_pool{true, 5000, 100}; // batches of received messages, both logging and monitoring // FIXME make these pairs or structs, container+mtx @@ -64,51 +71,22 @@ class DataModel : public DAQDataModelBase { std::vector*> in_multicast_msg_queue; std::mutex in_multicast_msg_queue_mtx; - // Logging - // ------- - // Tracking - //{ TODO encapsulate in Tool monitoring struct? - std::atomic log_polls_failed; // error polling socket - std::atomic log_recv_fails; // error in recv_from - std::atomic logs_recvd; // messages successfully received - std::atomic log_in_buffer_transfers; // transfers of thread-local message vector to datamodel - std::atomic log_out_buffer_transfers; // transfers of thread-local message vector to datamodel - std::atomic log_thread_crashes; // restarts of logging thread (main thread found reader thread 'running' was false) - //} // outgoing logging messages std::vector out_log_msg_queue; std::mutex out_log_msg_queue_mtx; - // Monitoring - // ---------- - //{ - std::atomic mon_polls_failed; - std::atomic mon_recv_fails; - std::atomic mons_recvd; - std::atomic mon_in_buffer_transfers; // transfers of thread-local message vector to datamodel - std::atomic mon_out_buffer_transfers; // transfers of thread-local message vector to datamodel - std::atomic mon_thread_crashes; - //} // outgoing monitoring messages std::vector out_mon_msg_queue; std::mutex out_mon_msg_queue_mtx; // pool is shared between read and write query receivers - Pool querybatch_pool(true, 5000, 100); + Pool querybatch_pool{true, 5000, 100}; /* ----------------------------------------- */ /* PubReceiver */ /* ----------------------------------------- */ - // TODO Tool monitoring struct? std::vector write_msg_queue; std::mutex write_msg_queue_mtx; - std::atomic write_polls_failed; - std::atomic write_msgs_rcvd; - std::atomic write_rcv_fails; - std::atomic write_bad_msgs; - std::atomic write_buffer_transfers; - std::atomic pub_rcv_thread_crashes; - //} /* ----------------------------------------- */ /* ReadReply */ @@ -116,20 +94,9 @@ class DataModel : public DAQDataModelBase { // TODO Tool monitoring struct? std::vector read_msg_queue; std::mutex read_msg_queue_mtx; - std::vector query_replies; + std::deque query_replies; std::mutex query_replies_mtx; - std::atomic readrep_polls_failed; - std::atomic readrep_msgs_rcvd; - std::atomic readrep_rcv_fails; - std::atomic readrep_bad_msgs; - std::atomic readrep_reps_sent; - std::atomic readrep_rep_send_fails; - std::atomic readrep_in_buffer_transfers; - std::atomic readrep_out_buffer_transfers; - std::atomic read_rcv_thread_crashes; - - /* ----------------------------------------- */ /* MulticastWorkers */ /* ----------------------------------------- */ @@ -149,49 +116,20 @@ class DataModel : public DAQDataModelBase { std::vector plotlyplot_query_queue; std::mutex plotlyplot_query_queue_mtx; - std::atomic multicast_job_distributor_thread_crashes; - std::atomic multicast_worker_job_fails; - std::atomic multicast_worker_job_successes; - /* ----------------------------------------- */ /* WriteWorkers */ /* ----------------------------------------- */ - std::atomic write_job_distributor_thread_crashes; - std::vector write_query_queue; std::mutex write_query_queue_mtx; - std::atomic write_worker_job_fails; - std::atomic write_worker_job_successes; - /* ----------------------------------------- */ /* DatabaseWorkers */ /* ----------------------------------------- */ - std::atomic database_job_distributor_thread_crashes; std::vector read_replies; // output, awaiting for result conversion std::mutex read_replies_mtx; - std::atomic db_worker_job_successes; // FIXME add for others - std::atomic db_worker_job_fails; - - /* ----------------------------------------- */ - /* ResultWorkers */ - /* ----------------------------------------- */ - std::atomic result_job_distributor_thread_crashes; - - std::atomic result_worker_job_fails; - std::atomic result_worker_job_successes; - - /* ----------------------------------------- */ - /* Monitoring */ - /* ----------------------------------------- */ - std::atomic monitoring_thread_crashes; - - /* ----------------------------------------- */ - /* SocketManager */ - /* ----------------------------------------- */ - std::atomic socket_manager_thread_crashes; + private: }; diff --git a/DataModel/ManagedSocket.h b/DataModel/ManagedSocket.h index 359a77e..d838c44 100644 --- a/DataModel/ManagedSocket.h +++ b/DataModel/ManagedSocket.h @@ -1,13 +1,17 @@ #ifndef ManagedSocket_H #define ManagedSocket_H +#include +#include +#include + struct ManagedSocket { std::mutex socket_mtx; zmq::socket_t* socket=nullptr; std::string service_name; std::string port; std::string port_name; - std::map connections; + std::map connections; }; #endif diff --git a/DataModel/MonitoringVariables.h b/DataModel/MonitoringVariables.h new file mode 100644 index 0000000..14bc0d7 --- /dev/null +++ b/DataModel/MonitoringVariables.h @@ -0,0 +1,11 @@ +#ifndef MonitoringVariables_H +#define MonitoringVariables_H + +class MonitoringVariables { + public: + MonitoringVariables(){}; + virtual ~MonitoringVariables(){}; + virtual std::string toJSON()=0; +}; + +#endif diff --git a/DataModel/QueryBatch.h b/DataModel/QueryBatch.h index 396211a..ffcce9a 100644 --- a/DataModel/QueryBatch.h +++ b/DataModel/QueryBatch.h @@ -1,11 +1,13 @@ #ifndef QUERY_BATCH_H #define QUERY_BATCH_H +#include + +#include "ZmqQuery.h" + struct QueryBatch { + // fill / read by receive/senders - QueryBatch(size_t prealloc_size){ - queries.reserve(prealloc_size); - } std::vector queries; // prepare for batch insertion by workers @@ -15,6 +17,22 @@ struct QueryBatch { std::string calibration_buffer; std::string plotlyplot_buffer; std::string rooplot_buffer; + + // flagged for can't be batch inserted by workers + std::vector generic_write_query_indices; + + // set by database workers after batch insert + bool alarm_batch_success; + std::vector devconfig_version_nums; + std::vector runconfig_version_nums; + std::vector calibration_version_nums; + std::vector plotlyplot_version_nums; + std::vector rootplot_version_nums; + + QueryBatch(size_t prealloc_size){ + queries.reserve(prealloc_size); + } + void reset(){ alarm_buffer = "["; devconfig_buffer = "["; @@ -23,7 +41,7 @@ struct QueryBatch { plotlyplot_buffer = "["; rooplot_buffer = "["; - alarm_batch_status = false; + alarm_batch_success = false; // the presence of returned version numbers is indication that these batch insertions worked devconfig_version_nums.clear(); @@ -31,28 +49,10 @@ struct QueryBatch { calibration_version_nums.clear(); plotlyplot_version_nums.clear(); rootplot_version_nums.clear(); + generic_write_query_indices.clear(); } - // set by database workers for batch submissions - bool alarm_batch_success; - - // FIXME check type returned from pqxx - std::vector devconfig_version_nums; - std::vector runconfig_version_nums; - std::vector calibration_version_nums; - std::vector plotlyplot_version_nums; - std::vector rootplot_version_nums; - -// // convert to zmq message on return path by workers -// void setsuccess(uint32_t succeeded){ -// for(ZmqQuery& q : queries) q.setsuccess(succeeded); -// } -// void setversionnums(){ -// for(size_t i=0; i - -Utilities::Utilities(zmq::context_t* zmqcontext){ - context=zmqcontext; - Threads.clear(); -} - -bool Utilities::AddService(std::string ServiceName, unsigned int port, bool StatusQuery){ - - zmq::socket_t Ireceive (*context, ZMQ_PUSH); - Ireceive.connect("inproc://ServicePublish"); - - boost::uuids::uuid m_UUID; - m_UUID = boost::uuids::random_generator()(); - - std::stringstream test; - test<<"Add "<< ServiceName <<" "< &connections, std::string port){ - - boost::uuids::uuid m_UUID=boost::uuids::random_generator()(); - long msg_id=0; - - zmq::socket_t Ireceive (*context, ZMQ_DEALER); - Ireceive.connect("inproc://ServiceDiscovery"); - - - zmq::message_t send(4); - snprintf ((char *) send.data(), 4 , "%s" ,"All") ; - - - Ireceive.send(send); - - zmq::message_t receive; - Ireceive.recv(&receive); - std::istringstream iss(static_cast(receive.data())); - - int size; - iss>>size; - - for(int i=0;i(servicem.data())); - service->JsonParser(ss.str()); - - std::string type; - std::string uuid; - std::string ip; - std::string remote_port; - service->Get("msg_value",type); - service->Get("uuid",uuid); - service->Get("ip",ip); - if(port=="") service->Get("remote_port",remote_port); - else remote_port=port; - std::string tmp=ip + ":" + remote_port; - - //if(type == ServiceName && connections.count(uuid)==0){ - if(type == ServiceName && connections.count(tmp)==0){ - connections[tmp]=service; - //std::string ip; - //std::string port; - //service->Get("ip",ip); - //service->Get("remote_port",port); - tmp="tcp://"+ tmp; - sock->connect(tmp.c_str()); - } - else{ - delete service; - service=0; - } - - - } - - return connections.size(); - } - -Thread_args* Utilities::CreateThread(std::string ThreadName, void (*func)(Thread_args*), Thread_args* args){ - - if(Threads.count(ThreadName)==0){ - - if(args==0) args = new Thread_args(); - - args->context=context; - args->ThreadName=ThreadName; - args->func=func; - args->running=true; - - pthread_create(&(args->thread), NULL, Utilities::Thread, args); - - args->sock=0; - Threads[ThreadName]=args; - -} - - else args=0; - - return args; - -} - - -Thread_args* Utilities::CreateThread(std::string ThreadName, void (*func)(std::string)){ - Thread_args *args =0; - - if(Threads.count(ThreadName)==0){ - - args = new Thread_args(context, ThreadName, func); - pthread_create(&(args->thread), NULL, Utilities::String_Thread, args); - args->sock=0; - args->running=true; - Threads[ThreadName]=args; - } - - return args; -} - -void *Utilities::String_Thread(void *arg){ - - - Thread_args *args = static_cast(arg); - - zmq::socket_t IThread(*(args->context), ZMQ_PAIR); - /// need to subscribe - std::stringstream tmp; - tmp<<"inproc://"<ThreadName; - IThread.bind(tmp.str().c_str()); - - - zmq::pollitem_t initems[] = { - {IThread, 0, ZMQ_POLLIN, 0}}; - - args->running = true; - - while(!args->kill){ - if(args->running){ - - std::string command=""; - - zmq::poll(&initems[0], 1, 0); - - if ((initems[0].revents & ZMQ_POLLIN)){ - - zmq::message_t message; - IThread.recv(&message); - command=std::string(static_cast(message.data())); - - } - - args->func_with_string(command); - } - - else usleep(100); - - } - - pthread_exit(NULL); - } - -void *Utilities::Thread(void *arg){ - - Thread_args *args = static_cast(arg); - - while (!args->kill){ - - if(args->running) args->func(args ); - else usleep(100); - - } - - pthread_exit(NULL); - -} - -bool Utilities::MessageThread(Thread_args* args, std::string Message, bool block){ - - bool ret=false; - - if(args){ - - if(!args->sock){ - - args->sock = new zmq::socket_t(*(args->context), ZMQ_PAIR); - std::stringstream tmp; - tmp<<"inproc://"<ThreadName; - args->sock->connect(tmp.str().c_str()); - - } - - zmq::message_t msg(Message.length()+1); - snprintf((char *)msg.data(), Message.length()+1, "%s", Message.c_str()); - - if(block) ret=args->sock->send(msg); - else ret=args->sock->send(msg, ZMQ_NOBLOCK); - - } - - return ret; - -} - -bool Utilities::MessageThread(std::string ThreadName, std::string Message, bool block){ - - return MessageThread(Threads[ThreadName],Message,block); -} - -bool Utilities::KillThread(Thread_args* &args){ - - bool ret=false; - - if(args){ - - args->running=false; - args->kill=true; - - pthread_join(args->thread, NULL); - //delete args; - //args=0; - - - } - - return ret; - -} - -bool Utilities::KillThread(std::string ThreadName){ - - return KillThread(Threads[ThreadName]); - -} - diff --git a/DataModel/Utilities.h b/DataModel/Utilities.h deleted file mode 100644 index 8aac354..0000000 --- a/DataModel/Utilities.h +++ /dev/null @@ -1,152 +0,0 @@ -#ifndef UTILITIES_H -#define UTILITIES_H - -#include -#include -#include -#include -#include -#include -#include -#include // generators -#include // streaming operators etc. - -/** - * \struct DataModelThread_args - * - * This is both an base class for any thread argument struct used in the tool threaded Tool templates. -Effectivly this acts as a place to put variable that are specfic to that thread and can be used as a place to transfer variables from the main thread to sub threads. - * - * - * $Author: B.Richards $ - * $Date: 2019/05/26 18:34:00 $ - * Contact: b.richards@qmul.ac.uk - * - */ - -struct Thread_args{ - - Thread_args(){ ///< Simple constructor - kill=false; - } - - Thread_args(zmq::context_t* contextin, std::string threadname, void (*funcin)(std::string)){ ///< Construtor for thread with string - - context=contextin; - ThreadName=threadname; - func_with_string=funcin; - kill=false; - } - - Thread_args(zmq::context_t* contextin, std::string threadname, void (*funcin)(Thread_args*)){ ///< Constrcutor for thread with args - - context=contextin; - ThreadName=threadname; - func=funcin; - kill=false; - } - - virtual ~Thread_args(){ ///< virtual constructor - running =false; - kill=true; - delete sock; - sock=0; - } - - zmq::context_t *context; ///< ZMQ context used for ZMQ socket creation - std::string ThreadName; ///< name of thread (deffined at creation) - void (*func_with_string)(std::string); ///< function pointer to string thread - void (*func)(Thread_args*); ///< function pointer to thread with args - pthread_t thread; ///< Simple constructor underlying thread that interface is built ontop of - zmq::socket_t* sock; ///< ZMQ socket pointer is assigned in string thread,but can be sued otherwise - bool running; ///< Bool flag to tell the thread to run (if not set thread goes into wait cycle - bool kill; ///< Bool flay used to kill the thread - -}; - - -/** - * \class Utilities - * - * This class can be instansiated in a Tool and provides some helpful threading, dynamic socket descovery and promotion functionality - * - * - * $Author: B.Richards $ - * $Date: 2019/05/26 18:34:00 $ - * Contact: b.richards@qmul.ac.uk - * - */ - -class Utilities{ - - public: - - Utilities(zmq::context_t* zmqcontext); ///< Simple constructor - bool AddService(std::string ServiceName, unsigned int port, bool StatusQuery=false); ///< Broadcasts an available service (only in remote mode) - bool RemoveService(std::string ServiceName); ///< Removes service broadcasts for a service - int UpdateConnections(std::string ServiceName, zmq::socket_t* sock, std::map &connections, std::string port=""); ///< Dynamically connects a socket tp services broadcast with a specific name - Thread_args* CreateThread(std::string ThreadName, void (*func)(std::string)); //func = &my_int_func; ///< Create a simple thread that has string exchange with main thread - Thread_args* CreateThread(std::string ThreadName, void (*func)(Thread_args*), Thread_args* args); ///< Create a thread with more complicated data exchange definned by arguments - bool MessageThread(Thread_args* args, std::string Message, bool block=true); ///< Send simple string to String thread - bool MessageThread(std::string ThreadName, std::string Message, bool block=true); ///< Send simple string to String thread - bool KillThread(Thread_args* &args); ///< Kill a thread assosiated to args - bool KillThread(std::string ThreadName); ///< Kill a thread by name - - template bool KillThread(T* pointer){ - - Thread_args* tmp=pointer; - return KillThread(tmp); - - } ///< Kill a thread with args that inheirt form base Thread_args - - template bool SendPointer(zmq::socket_t* sock, T* pointer){ - - std::stringstream tmp; - tmp<send(message); - - } ///< Send a pointer over a ZMQ socket - - template bool ReceivePointer(zmq::socket_t* sock, T*& pointer){ - - zmq::message_t message; - - if(sock->recv(&message)){ - - std::istringstream iss(static_cast(message.data())); - - // long long unsigned int tmpP; - unsigned long tmpP; - iss>>std::hex>>tmpP; - - pointer=reinterpret_cast(tmpP); - - return true; - } - - else { - pointer=0; - return false; - } - - } ///< Receive a pointer over a ZMQ socket - - - - - private: - - zmq::context_t *context; ///< ZMQ context pointer - static void* String_Thread(void *arg); ///< Simpe string thread - static void* Thread(void *arg); ///< Thread with args - std::map Threads; ///< Map of threads managed by the utilities class. - - -}; - - -#endif diff --git a/DataModel/ZmqQuery.h b/DataModel/ZmqQuery.h index 255d0a2..81b3789 100644 --- a/DataModel/ZmqQuery.h +++ b/DataModel/ZmqQuery.h @@ -1,6 +1,8 @@ #ifndef ZMQ_QUERY_H #define ZMQ_QUERY_H -#include + +#include +#include struct ZmqQuery { @@ -18,7 +20,7 @@ struct ZmqQuery { ZmqQuery& operator=(ZmqQuery&& c) = default; // 4 parts for receiving, for sending 3+ parts - std::vector parts(4); + std::vector parts{4}; size_t size() const { return parts.size(); } @@ -32,23 +34,23 @@ struct ZmqQuery { } // received and returned std::string_view client_id(){ - return std::string_view{parts[0].data(),parts[0].data().size()}; + return std::string_view{(const char*)parts[0].data(),parts[0].size()}; } uint32_t msg_id(){ return *reinterpret_cast(parts[1].data()); } // received only std::string_view topic(){ - return std::string_view{parts[2].data(),parts[2].data().size()}; + return std::string_view{(const char*)parts[2].data(),parts[2].size()}; } std::string_view msg(){ - return std::string_view{parts[3].data(),parts[3].data().size()}; + return std::string_view{(const char*)parts[3].data(),parts[3].size()}; } // for setting success void setsuccess(uint32_t succeeded){ - //parts[2]=new(parts[2]) zmq::message_t(sizeof(uint32_t)); // uhh, is there a better way to call zmq_msg_init_size? - zmq_msg_init_size(&parts[2],sizeof(uint32_t)); // this is from underlying c api... FIXME? + //zmq_msg_init_size(&parts[2],sizeof(uint32_t)); // this is from underlying c api... mismatch zmq_msg_t* / zmq::message_t + new(&parts[2]) zmq::message_t(sizeof(uint32_t)); // FIXME is there a better way to call zmq_msg_init_size? memcpy((void*)parts[2].data(),&succeeded,sizeof(uint32_t)); return; } @@ -61,12 +63,24 @@ struct ZmqQuery { parts.resize(3+n_rows); return; } - void setresponse(size_t row_num, std::string_view row){ - zmq_msg_init_size(&parts[row_num+3],row.size()); // this is from underlying c api... FIXME? - memcpy((void*)parts[row_num+3].data(),row.data(),row.size()); + + void setresponse(size_t row_num, std::string_view val){ + //zmq_msg_init_size(&parts[row_num+3],row.size()); // mismatch zmq_msg_t* / zmq::message_t + new(&parts[row_num+3]) zmq::message_t(val.size()); // FIXME better way to call zmq_msg_init_size + memcpy((void*)parts[row_num+3].data(),val.data(),val.size()); + return; + } + + template + typename std::enable_if::value, void>::type + setresponse(size_t row_num, T val){ + //zmq_msg_init_size(&parts[row_num+3],row.size()); // mismatch zmq_msg_t* / zmq::message_t + new(&parts[row_num+3]) zmq::message_t(sizeof(val)); // FIXME better way to call zmq_msg_init_size + memcpy((void*)parts[row_num+3].data(),&val,sizeof(val)); return; } -} + +}; #endif diff --git a/DataModel/query_topics.h b/DataModel/query_topics.h new file mode 100644 index 0000000..8b94ffb --- /dev/null +++ b/DataModel/query_topics.h @@ -0,0 +1,9 @@ +#ifndef QUERY_TYPES_H +#define QUERY_TYPES_H + +// used by MulticastWorkers and DatabaseWorkers +// only write query topics +enum class query_topic : char { alarm='A', dev_config='D', run_config='R', calibration='C', logging='L', monitoring='M', rootplot='T', plotlyplot='P', generic='Q' }; + +#endif + diff --git a/DataModel/query_typedefs.h b/DataModel/query_typedefs.h deleted file mode 100644 index 5117b41..0000000 --- a/DataModel/query_typedefs.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef QUERY_TYPES_H -#define QUERY_TYPES_H - -// used by MulticastWorkers and DatabaseWorkers -// only write query topics -enum class query_topic char { alarm='A', dev_config='D', run_config='R', calibration='C', logging='L', monitoring='M', rootplot='T', plotlyplot='P', generic='Q' }; - -#endif - diff --git a/Makefile b/Makefile index 8968bed..bc379ac 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,7 @@ ToolFrameworkCore=$(Dependencies)/ToolFrameworkCore ToolDAQFramework=$(Dependencies)/ToolDAQFramework SOURCEDIR=`pwd` -CXXFLAGS= -fPIC -std=c++11 -Wno-comment # -Wpedantic -Wall -Wno-unused -Wextra -Wcast-align -Wcast-qual -Wctor-dtor-privacy -Wdisabled-optimization -Wformat=2 -Winit-self -Wlogical-op -Wmissing-declarations -Wmissing-include-dirs -Wnoexcept -Woverloaded-virtual -Wredundant-decls -Wshadow -Wsign-conversion -Wsign-promo -Wstrict-null-sentinel -Wstrict-overflow=5 -Wswitch-default -Wundef #-Werror -Wold-style-cast +CXXFLAGS= -fmax-errors=3 -fPIC -std=c++20 -Wno-comment -Werror=array-bounds -Werror=return-type # -Wpedantic -Wall -Wno-unused -Wextra -Wcast-align -Wcast-qual -Wctor-dtor-privacy -Wdisabled-optimization -Wformat=2 -Winit-self -Wlogical-op -Wmissing-declarations -Wmissing-include-dirs -Wnoexcept -Woverloaded-virtual -Wredundant-decls -Wshadow -Wsign-conversion -Wsign-promo -Wstrict-null-sentinel -Wstrict-overflow=5 -Wswitch-default -Wundef #-Werror -Wold-style-cast ifeq ($(MAKECMDGOALS),debug) @@ -18,13 +18,18 @@ DataModelLib = MyToolsInclude = MyToolsLib = -ZMQLib= -L $(Dependencies)/zeromq-4.0.7/lib -lzmq -ZMQInclude= -I $(Dependencies)/zeromq-4.0.7/include/ +ZMQLib= -L $(Dependencies)/zeromq-4.0.7/lib -lzmq +ZMQInclude= -I $(Dependencies)/zeromq-4.0.7/include/ BoostLib= -L $(Dependencies)/boost_1_66_0/install/lib -lboost_date_time -lboost_serialization -lboost_iostreams BoostInclude= -I $(Dependencies)/boost_1_66_0/install/include -Includes=-I $(ToolFrameworkCore)/include/ -I $(ToolDAQFramework)/include/ -I $(SOURCEDIR)/include/ $(ZMQInclude) $(BoostInclude) +#PostgresLib= -L $(Dependencies)/libpqxx-6.4.5/install/lib -lpqxx -L `pg_config --libdir` -lpq +#PostgresInclude= -I $(Dependencies)/libpqxx-6.4.5/install/include -I `pg_config --includedir` +PostgresLib= -L $(Dependencies)/libpqxx-7.10.4/install/lib -lpqxx -L `pg_config --libdir` -lpq +PostgresInclude= -I $(Dependencies)/libpqxx-7.10.4/install/include -I `pg_config --includedir` + +Includes=-I $(ToolFrameworkCore)/include/ -I $(ToolDAQFramework)/include/ -I $(SOURCEDIR)/include/ $(ZMQInclude) $(BoostInclude) $(PostgresInclude) ToolLibraries = $(patsubst %, lib/%, $(filter lib%, $(subst /, , $(wildcard UserTools/*/*.so)))) LIBRARIES=lib/libDataModel.so lib/libMyTools.so $(ToolLibraries) DataModelHEADERS:=$(patsubst %.h, include/%.h, $(filter %.h, $(subst /, ,$(wildcard DataModel/*.h)))) @@ -32,7 +37,7 @@ MyToolHEADERS:=$(patsubst %.h, include/%.h, $(filter %.h, $(subst /, ,$(wildcard ToolLibs = $(patsubst %.so, %, $(patsubst lib%, -l%,$(filter lib%, $(subst /, , $(wildcard UserTools/*/*.so))))) AlreadyCompiled = $(wildcard UserTools/$(filter-out %.so UserTools , $(subst /, ,$(wildcard UserTools/*/*.so)))/*.cpp) SOURCEFILES:=$(patsubst %.cpp, %.o, $(filter-out $(AlreadyCompiled), $(wildcard src/*.cpp) $(wildcard UserTools/*/*.cpp) $(wildcard DataModel/*.cpp))) -Libs=-L $(SOURCEDIR)/lib/ -lDataModel -L $(ToolDAQFramework)/lib/ -lToolDAQChain -lDAQDataModelBase -lDAQLogging -lServiceDiscovery -lDAQStore -L $(ToolFrameworkCore)/lib/ -lToolChain -lMyTools -lDataModelBase -lLogging -lStore -lpthread $(ToolLibs) -L $(ToolDAQFramework)/lib/ -lToolDAQChain -lDAQDataModelBase -lDAQLogging -lServiceDiscovery -lDAQStore $(ZMQLib) $(BoostLib) +Libs=-L $(SOURCEDIR)/lib/ -lDataModel -L $(ToolDAQFramework)/lib/ -lToolDAQChain -lDAQDataModelBase -lDAQLogging -lServiceDiscovery -lDAQStore -L $(ToolFrameworkCore)/lib/ -lToolChain -lMyTools -lDataModelBase -lLogging -lStore -lpthread $(ToolLibs) -L $(ToolDAQFramework)/lib/ -lToolDAQChain -lDAQDataModelBase -lDAQLogging -lServiceDiscovery -lDAQStore $(ZMQLib) $(BoostLib) $(PostgresLib) #.SECONDARY: $(%.o) @@ -43,13 +48,13 @@ debug: all main: src/main.o $(LIBRARIES) $(DataModelHEADERS) $(MyToolHEADERS) | $(SOURCEFILES) @echo -e "\e[38;5;11m\n*************** Making " $@ " ****************\e[0m" - g++ $(CXXFLAGS) $< -o $@ $(Includes) $(Libs) $(DataModelInclude) $(DataModelLib) $(MyToolsInclude) $(MyToolsLib) + g++ $(CXXFLAGS) $< -o $@ $(Includes) $(Libs) $(DataModelInclude) $(DataModelLib) $(MyToolsInclude) $(MyToolsLib) include/%.h: @echo -e "\e[38;5;87m\n*************** sym linking headers ****************\e[0m" ln -s `pwd`/$(filter %$(strip $(patsubst include/%.h, /%.h, $@)), $(wildcard DataModel/*.h) $(wildcard UserTools/*/*.h) $(wildcard UserTools/*.h)) $@ -src/%.o : src/%.cpp +src/%.o : src/%.cpp @echo -e "\e[38;5;214m\n*************** Making " $@ "****************\e[0m" g++ $(CXXFLAGS) -c $< -o $@ $(Includes) diff --git a/Setup.sh b/Setup.sh index 563712e..bce1648 100755 --- a/Setup.sh +++ b/Setup.sh @@ -6,6 +6,6 @@ Dependencies=/opt #source ${ToolDAQapp}/ToolDAQ/root/bin/thisroot.sh -export LD_LIBRARY_PATH=`pwd`/lib:${Dependencies}/zeromq-4.0.7/lib:${Dependencies}/boost_1_66_0/install/lib:${Dependencies}/ToolFrameworkCore/lib:${Dependencies}/ToolDAQFramework/lib:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH=`pwd`/lib:${Dependencies}/zeromq-4.0.7/lib:${Dependencies}/boost_1_66_0/install/lib:${Dependencies}/libpqxx-7.10.4/install/lib:${Dependencies}/ToolFrameworkCore/lib:${Dependencies}/ToolDAQFramework/lib:$LD_LIBRARY_PATH export SEGFAULT_SIGNALS="all" diff --git a/UserTools/DatabaseWorkers/DatabaseWorkerMonitoring.h b/UserTools/DatabaseWorkers/DatabaseWorkerMonitoring.h new file mode 100644 index 0000000..4e589a1 --- /dev/null +++ b/UserTools/DatabaseWorkers/DatabaseWorkerMonitoring.h @@ -0,0 +1,66 @@ +#ifndef DatabaseWorkerMonitoring_H +#define DatabaseWorkerMonitoring_H + +#include "MonitoringVariables.h" + +class DatabaseWorkerMonitoring : public MonitoringVariables { + public: + DatabaseWorkerMonitoring(){}; + ~DatabaseWorkerMonitoring(){}; + + std::atomic logging_submissions; + std::atomic logging_submissions_failed; + std::atomic monitoring_submissions; + std::atomic monitoring_submissions_failed; + std::atomic rootplot_submissions; + std::atomic rootplot_submissions_failed; + std::atomic plotlyplot_submissions; + std::atomic plotlyplot_submissions_failed; + std::atomic alarm_submissions; + std::atomic alarm_submissions_failed; + std::atomic devconfig_submissions; + std::atomic devconfig_submissions_failed; + std::atomic runconfig_submissions; + std::atomic runconfig_submissions_failed; + std::atomic calibration_submissions; + std::atomic calibration_submissions_failed; + std::atomic genericwrite_submissions; + std::atomic genericwrite_submissions_failed; + std::atomic readquery_submissions; + std::atomic readquery_submissions_failed; + std::atomic jobs_completed; + std::atomic jobs_failed; + std::atomic thread_crashes; // restarts of tool worker thread (main thread found reader thread 'running' was false) + + std::string toJSON(){ + + std::string s="{\"logging_submissions\":"+std::to_string(logging_submissions.load()) + +",\"logging_submissions_failed\":"+std::to_string(logging_submissions_failed.load()) + +",\"monitoring_submissions\":"+std::to_string(monitoring_submissions.load()) + +",\"monitoring_submissions_failed\":"+std::to_string(monitoring_submissions_failed.load()) + +",\"rootplot_submissions\":"+std::to_string(rootplot_submissions.load()) + +",\"rootplot_submissions_failed\":"+std::to_string(rootplot_submissions_failed.load()) + +",\"plotlyplot_submissions\":"+std::to_string(plotlyplot_submissions.load()) + +",\"plotlyplot_submissions_failed\":"+std::to_string(plotlyplot_submissions_failed.load()) + +",\"alarm_submissions\":"+std::to_string(alarm_submissions.load()) + +",\"alarm_submissions_failed\":"+std::to_string(alarm_submissions_failed.load()) + +",\"devconfig_submissions\":"+std::to_string(devconfig_submissions.load()) + +",\"devconfig_submissions_failed\":"+std::to_string(devconfig_submissions_failed.load()) + +",\"runconfig_submissions\":"+std::to_string(runconfig_submissions.load()) + +",\"runconfig_submissions_failed\":"+std::to_string(runconfig_submissions_failed.load()) + +",\"calibration_submissions\":"+std::to_string(calibration_submissions.load()) + +",\"calibration_submissions_failed\":"+std::to_string(calibration_submissions_failed.load()) + +",\"genericwrite_submissions\":"+std::to_string(genericwrite_submissions.load()) + +",\"genericwrite_submissions_failed\":"+std::to_string(genericwrite_submissions_failed.load()) + +",\"readquery_submissions\":"+std::to_string(readquery_submissions.load()) + +",\"readquery_submissions_failed\":"+std::to_string(readquery_submissions_failed.load()) + +",\"jobs_failed\":"+std::to_string(jobs_failed.load()) + +",\"jobs_completed\":"+std::to_string(jobs_completed.load()) + +",\"thread_crashes\":"+std::to_string(thread_crashes.load()) + +"}"; + + return s; + } +}; + +#endif diff --git a/UserTools/DatabaseWorkers/DatabaseWorkers.cpp b/UserTools/DatabaseWorkers/DatabaseWorkers.cpp index 14ad3c1..b324d83 100644 --- a/UserTools/DatabaseWorkers/DatabaseWorkers.cpp +++ b/UserTools/DatabaseWorkers/DatabaseWorkers.cpp @@ -1,25 +1,28 @@ #include "DatabaseWorkers.h" -#include "GenericFunctions.h" +#include +#include +//#include DatabaseWorkers::DatabaseWorkers():Tool(){} +std::string DatabaseWorkers::connection_string=""; bool DatabaseWorkers::Initialise(std::string configfile, DataModel &data){ - if(configfile!="") m_variables.Initialise(configfile); + InitialiseTool(data); + m_configfile = configfile; + InitialiseConfiguration(configfile); //m_variables.Print(); - m_data= &data; - m_log= m_data->Log; + /* ----------------------------------------- */ + /* Configuration */ + /* ----------------------------------------- */ - if(!m_variables.Get("verbose",m_verbose)) m_verbose=1; - - // ########################################################################## - // default initialize variables - // ########################################################################## + m_verbose=1; std::string dbhostname = "/tmp"; // '/tmp' = local unix socket std::string dbhostaddr = ""; // fallback if hostname is empty, an ip address int dbport = 5432; // database port + std::string dbname = "daq"; // database name std::string dbuser = ""; // database user to connect as. defaults to PGUSER env var if empty. std::string dbpasswd = ""; // database password. defaults to PGPASS or PGPASSFILE if not given. @@ -29,22 +32,43 @@ bool DatabaseWorkers::Initialise(std::string configfile, DataModel &data){ // with the pg_ident.conf file in postgres database. in such a case dbuser and dbpasswd // should be left empty - // ########################################################################## - // # Update with user-specified values. - // ########################################################################## - // TODO make these config var keys specific for each db. + m_variables.Get("verbose",m_verbose); m_variables.Get("hostname",dbhostname); m_variables.Get("hostaddr",dbhostaddr); - m_variables.Get("hostaddr",dbname); + m_variables.Get("dbname",dbname); m_variables.Get("port",dbport); m_variables.Get("user",dbuser); m_variables.Get("passwd",dbpasswd); + // number of database workers - FIXME needs to match concurrency of postgres backend + max_workers = 10; + m_variables.Get("max_workers", max_workers); + + ExportConfiguration(); + + /* ----------------------------------------- */ + /* Thread Setup */ + /* ----------------------------------------- */ - // ########################################################################## - // # Open connection - // ########################################################################## + // monitoring struct to encapsulate tracking info + std::unique_lock locker(m_data->monitoring_variables_mtx); + m_data->monitoring_variables.emplace(m_tool_name, &monitoring_vars); + + // we *do* need a unique worker pool here because these workers + // maintain a connection to the database, so are a 'limited resource' + job_manager = new WorkerPoolManager(database_jobqueue, &max_workers, &(m_data->thread_cap), &(m_data->num_threads), nullptr, true); + + thread_args.m_data = m_data; + thread_args.monitoring_vars = &monitoring_vars; + thread_args.job_queue = &database_jobqueue; + m_data->utils.CreateThread("database_job_distributor", &Thread, &thread_args); + m_data->num_threads++; + + /* ----------------------------------------- */ + /* DB Test */ + /* ----------------------------------------- */ // pass connection details to the postgres interface class + std::stringstream tmp; if(dbhostname!="") tmp<<" host="<thread_cap), &(m_data->num_threads), nullptr, true); - - thread_args.m_data = m_data; - thread_args.job_queue = &database_jobqueue; - m_data->utils.CreateThread("database_job_distributor", &Thread, &thread_args); - m_data->num_threads++; - return true; } @@ -98,11 +113,11 @@ bool DatabaseWorkers::Execute(){ // FIXME ok but actually this kills all our jobs, not just our job distributor // so we don't want to do that. - if(!thread_args->running){ + if(!thread_args.running){ Log(m_tool_name+" Execute found thread not running!",v_error); Finalise(); - Initialise(); // FIXME should we give up if Initialise returns false? should we set StopLoop to 1? - ++m_data->database_job_distributor_thread_crashes; + Initialise(m_configfile, *m_data); // FIXME should we give up if Initialise returns false? should we set StopLoop to 1? + ++(monitoring_vars.thread_crashes); } return true; @@ -122,6 +137,10 @@ bool DatabaseWorkers::Finalise(){ delete job_manager; job_manager = nullptr; m_data->num_threads--; + + std::unique_lock locker(m_data->monitoring_variables_mtx); + m_data->monitoring_variables.erase(m_tool_name); + Log(m_tool_name+": Finished",v_warning); return true; @@ -129,35 +148,41 @@ bool DatabaseWorkers::Finalise(){ // ««-------------- ≪ °◇◆◇° ≫ --------------»» -void DatabaseWorkers::Thread(Thread_args* arg){ - - m_args = dynamic_cast(arg); - - // add a new Job to the job queue to process this data - Job* the_job = m_data->job_pool.GetNew(&m_data->job_pool, "database_worker"); - if(the_job->data == nullptr){ - // on first creation of the job, make it a JobStruct to encapsulate its data - // N.B. Pool::GetNew will only invoke the constructor if this is a new instance, - // (not if it's been used before and then returned to the pool) - // so don't pass job-specific variables to the constructor - the_job->data = m_args->job_struct_pool.GetNew(&m_args->job_struct_pool, m_args->m_data); - } else { - // FIXME error - std::cerr<<"database_worker Job with non-null data pointer!"<func = DatabaseJob; - the_job->fail_func = DatabaseJobFail; + DatabaseJobDistributor_args* m_args = dynamic_cast(args); - DatabaseJobStruct* job_data = dynamic_cast(the_job->data); - job_data->connection_string = connection_string; + // get a new Job to the job queue to process this data + if(m_args->the_job==nullptr){ + m_args->the_job = m_args->m_data->job_pool.GetNew("database_worker"); + m_args->the_job->out_pool = &m_args->m_data->job_pool; + + if(m_args->the_job->data == nullptr){ + // on first creation of the job, make it a JobStruct to encapsulate its data + // N.B. Pool::GetNew will only invoke the constructor if this is a new instance, + // (not if it's been used before and then returned to the pool) + // so don't pass job-specific variables to the constructor + m_args->the_job->data = m_args->job_struct_pool.GetNew(&m_args->job_struct_pool, m_args->m_data, m_args->monitoring_vars); + } else { + // FIXME error + std::cerr<<"database_worker Job with non-null data pointer!"<the_job->func = DatabaseJob; + m_args->the_job->fail_func = DatabaseJobFail; + + // FIXME this could leak the_job if the toolchain ends... gonna ignore that, i dunno how to handle it. + } + + DatabaseJobStruct* job_data = static_cast(m_args->the_job->data); + job_data->clear(); // XXX ok we have flexibility here on how much we want each worker to grab // the more we do in one transaction (one job) the better throughput... // but with possibly greater latency on replies // grab logging queries - locker = std::unique_lock(m_args->m_data->log_query_queue_mtx); + std::unique_lock locker(m_args->m_data->log_query_queue_mtx); if(!m_args->m_data->log_query_queue.empty()){ std::swap(m_args->m_data->log_query_queue, job_data->logging_queue); } @@ -168,6 +193,17 @@ void DatabaseWorkers::Thread(Thread_args* arg){ std::swap(m_args->m_data->mon_query_queue, job_data->monitoring_queue); } + // if rootplot queries go over multicast, grab those + locker = std::unique_lock(m_args->m_data->rootplot_query_queue_mtx); + if(!m_args->m_data->rootplot_query_queue.empty()){ + std::swap(m_args->m_data->rootplot_query_queue, job_data->rootplot_queue); + } + + // if plotlyplot queries go over multicast, grab those + locker = std::unique_lock(m_args->m_data->plotlyplot_query_queue_mtx); + if(!m_args->m_data->plotlyplot_query_queue.empty()){ + std::swap(m_args->m_data->plotlyplot_query_queue, job_data->plotlyplot_queue); + // grab write queries locker = std::unique_lock(m_args->m_data->write_query_queue_mtx); if(!m_args->m_data->write_query_queue.empty()){ @@ -175,36 +211,38 @@ void DatabaseWorkers::Thread(Thread_args* arg){ } // grab read queries - std::unique_lock locker(m_args->m_data->read_msg_queue_mtx); + locker = std::unique_lock(m_args->m_data->read_msg_queue_mtx); if(!m_args->m_data->read_msg_queue.empty()){ std::swap(m_args->m_data->read_msg_queue, job_data->read_queue); } - - // if they go over multicast - // grab rootplot queries - locker = std::unique_lock(m_args->m_data->rootplot_query_queue_mtx); - if(!m_args->m_data->rootplot_query_queue.empty()){ - std::swap(m_args->m_data->rootplot_query_queue, job_data->rootplot_queue); - } - - // grab plotlyplot queries - locker = std::unique_lock(m_args->m_data->plotlyplot_query_queue_mtx); - if(!m_args->m_data->plotlyplot_query_queue.empty()){ - std::swap(m_args->m_data->plotlyplot_query_queue, job_data->plotlyplot_queue); } locker.unlock(); - /*ok =*/ m_args->job_queue.AddJob(the_job); // just checks if you've defined func and first_vals = true; + // check if the job had something to do + if(job_data->logging_queue.empty() && + job_data->monitoring_queue.empty() && + job_data->rootplot_queue.empty() && + job_data->plotlyplot_queue.empty() && + job_data->write_queue.empty() && + job_data->read_queue.empty()) return; + + job_data->m_job_name = "database_worker"; + + m_args->job_queue->AddJob(m_args->the_job); + m_args->the_job = nullptr; + + return; } // ««-------------- ≪ °◇◆◇° ≫ --------------»» -void WriteWorkers::DatabaseJobFail(void*& arg){ +void DatabaseWorkers::DatabaseJobFail(void*& arg){ // safety check in case the job somehow fails after returning its args to the pool if(arg==nullptr){ + std::cerr<<"multicast worker fail with no args"<(arg); - ++(*m_args->m_data->db_worker_job_fails); + DatabaseJobStruct* m_args=static_cast(arg); + std::cerr<m_job_name<<" failure"<monitoring_vars->jobs_failed); // return our job args to the pool - m_args->m_pool.Add(m_args); + m_args->m_pool->Add(m_args); m_args = nullptr; // clear the local m_args variable... not strictly necessary arg = nullptr; // clear the job 'data' member variable + return; } // ««-------------- ≪ °◇◆◇° ≫ --------------»» -void DatabaseWorkers::DatabaseJob(void*& arg){ +bool DatabaseWorkers::DatabaseJob(void*& arg){ - DatabaseJobStruct* m_args = dynamic_cast(arg); + DatabaseJobStruct* m_args = static_cast(arg); // the worker will need a connection to the database - thread_local pqxx::connection* conn; + thread_local std::unique_ptr conn; if(conn==nullptr){ - conn = new pqxx::connection(m_args->connection_string); + conn.reset(new pqxx::connection(DatabaseWorkers::connection_string)); if(!conn){ - Log("Failed to open connection to database for worker thread!",v_error); // FIXME logging + //Log("Failed to open connection to database for worker thread!",v_error); // FIXME logging // FIXME terminate this worker... m_args->running=false? - return; + return false; } else { // set up prepared statements. These are, sadly, a property of the connection // logging insert - conn.prepare("logging_insert", "INSERT INTO logging ( time, device, severity, message ) SELECT * FROM jsonb_to_recordset( $1::jsonb ) as t(time timestamptz, device text, severity int, message text)"); + conn->prepare("logging_insert", "INSERT INTO logging ( time, device, severity, message ) SELECT * FROM jsonb_to_recordset( $1::jsonb ) as t(time timestamptz, device text, severity int, message text)"); // monitoring insert - conn.prepare("monitoring_insert", "INSERT INTO monitoring ( time, device, severity, message ) SELECT * FROM jsonb_to_recordset( $1::jsonb ) as t(time timestamptz, device text, subject text, data jsonb)"); + conn->prepare("monitoring_insert", "INSERT INTO monitoring ( time, device, subject, data ) SELECT * FROM jsonb_to_recordset( $1::jsonb ) as t(time timestamptz, device text, subject text, data jsonb)"); // alarms insert - conn.prepare("alarms_insert", "INSERT INTO alarms ( time, device, level, alarm ) SELECT * FROM jsonb_to_recordset( $1::jsonb ) as t(time timestamptz, device text, level int, alarm text)"); + conn->prepare("alarms_insert", "INSERT INTO alarms ( time, device, level, alarm ) SELECT * FROM jsonb_to_recordset( $1::jsonb ) as t(time timestamptz, device text, level int, alarm text)"); // rootplot insert - conn.prepare("rootplots_insert", "INSERT INTO rootplots ( time, name, data, draw_options ) SELECT * FROM jsonb_to_recordset( $1::jsonb ) as t(time timestamptz, name text, data jsonb, draw_options text)"); + conn->prepare("rootplots_insert", "INSERT INTO rootplots ( time, name, data, draw_options ) SELECT * FROM jsonb_to_recordset( $1::jsonb ) as t(time timestamptz, name text, data jsonb, draw_options text)"); // plotlyplot insert - conn.prepare("plotlyplots_insert", "INSERT INTO plotlyplots ( time, name, data, layout ) SELECT * FROM jsonb_to_recordset( $1::jsonb ) as t(time timestamptz, name text, data jsonb, layout jsonb)"); + conn->prepare("plotlyplots_insert", "INSERT INTO plotlyplots ( time, name, data, layout ) SELECT * FROM jsonb_to_recordset( $1::jsonb ) as t(time timestamptz, name text, data jsonb, layout jsonb)"); // calibration insert - conn.prepare("calibration_insert", "INSERT INTO calibration ( time, name, severity, message ) SELECT * FROM jsonb_to_recordset( $1::jsonb ) as t(time timestamptz, name text, description text, data jsonb)"); + conn->prepare("calibration_insert", "INSERT INTO calibration ( time, name, severity, message ) SELECT * FROM jsonb_to_recordset( $1::jsonb ) as t(time timestamptz, name text, description text, data jsonb)"); // device config insert - conn.prepare("device_config_insert", "INSERT INTO device_config ( time, device, author, description, data ) SELECT * FROM jsonb_to_recordset( $1::jsonb ) as t(time timestamptz, device text, author text, description text, data jsonb)"); + conn->prepare("device_config_insert", "INSERT INTO device_config ( time, device, author, description, data ) SELECT * FROM jsonb_to_recordset( $1::jsonb ) as t(time timestamptz, device text, author text, description text, data jsonb)"); // run config insert - conn.prepare("run_config_insert", "INSERT INTO run_config ( time, name, author, description, data ) SELECT * FROM jsonb_to_recordset( $1::jsonb ) as t(time timestamptz, name text, author text, description text, data jsonb)"); + conn->prepare("run_config_insert", "INSERT INTO run_config ( time, name, author, description, data ) SELECT * FROM jsonb_to_recordset( $1::jsonb ) as t(time timestamptz, name text, author text, description text, data jsonb)"); + } } + // FIXME if the DB goes down, implement some sort of pausing(?) or local recording to local disk (SQLite?) + // we also use a single transaction for all queries, so open that now - pqxx::work tx(conn); // aka pqxx::transaction<> + pqxx::work tx(*conn.get()); // aka pqxx::transaction<> // insert new logging statements try { tx.exec(pqxx::prepped{"logging_insert"}, pqxx::params{m_args->logging_queue}); - ++(*m_args->m_data->n_logging_submissions); + ++(m_args->monitoring_vars->logging_submissions); } catch (std::exception& e){ - ++(*m_args->m_data->n_logging_submissions_failed); + ++(m_args->monitoring_vars->logging_submissions_failed); // FIXME log the error here + // FIXME if we catch (pqxx::sql_error const &e) or others can we get better information? } // insert new monitoring statements try { tx.exec(pqxx::prepped{"monitoring_insert"}, pqxx::params{m_args->monitoring_queue}); - ++(*m_args->m_data->n_monitoring_submissions); + ++(m_args->monitoring_vars->monitoring_submissions); } catch (std::exception& e){ - ++(*m_args->m_data->n_monitoring_submissions_failed); + ++(m_args->monitoring_vars->monitoring_submissions_failed); // FIXME log the error here } // insert new multicast rootplot statements try { tx.exec(pqxx::prepped{"rootplots_insert"}, pqxx::params{m_args->rootplot_queue}); - ++(*m_args->m_data->n_rootplot_submissions); + ++(m_args->monitoring_vars->rootplot_submissions); } catch (std::exception& e){ - ++(*m_args->m_data->n_rootplot_submissions_failed); + ++(m_args->monitoring_vars->rootplot_submissions_failed); // FIXME log the error here } // insert new multicast plotlyplot statements try { tx.exec(pqxx::prepped{"plotlyplots_insert"}, pqxx::params{m_args->plotlyplot_queue}); - ++(*m_args->m_data->n_plotlyplot_submissions); + ++(m_args->monitoring_vars->plotlyplot_submissions); } catch (std::exception& e){ - ++(*m_args->m_data->n_plotlyplot_submissions_failed); + ++(m_args->monitoring_vars->plotlyplot_submissions_failed); // FIXME log the error here } @@ -317,10 +361,10 @@ void DatabaseWorkers::DatabaseJob(void*& arg){ try { tx.exec(pqxx::prepped{"alarms_insert"}, pqxx::params{batch->alarm_buffer}); batch->alarm_batch_success = true; - ++(*m_args->m_data->n_alarm_submissions); + ++(m_args->monitoring_vars->alarm_submissions); } catch (std::exception& e){ batch->alarm_batch_success = false; - ++(*m_args->m_data->n_alarm_submissions_failed); + ++(m_args->monitoring_vars->alarm_submissions_failed); // FIXME log the error here } @@ -336,9 +380,9 @@ void DatabaseWorkers::DatabaseJob(void*& arg){ [&batch](int32_t new_version_num){ batch->devconfig_version_nums.push_back(new_version_num); }, pqxx::params{batch->devconfig_buffer}); - ++(*m_args->m_data->n_devconfig_submissions); + ++(m_args->monitoring_vars->devconfig_submissions); } catch (std::exception& e){ - ++(*m_args->m_data->n_devconfig_submissions_failed); + ++(m_args->monitoring_vars->devconfig_submissions_failed); // FIXME log the error here } @@ -348,9 +392,9 @@ void DatabaseWorkers::DatabaseJob(void*& arg){ [&batch](int32_t new_version_num){ batch->runconfig_version_nums.push_back(new_version_num); }, pqxx::params{batch->runconfig_buffer}); - ++(*m_args->m_data->n_runconfig_submissions); + ++(m_args->monitoring_vars->runconfig_submissions); } catch (std::exception& e){ - ++(*m_args->m_data->n_runconfig_submissions_failed); + ++(m_args->monitoring_vars->runconfig_submissions_failed); // FIXME log the error here } @@ -360,9 +404,9 @@ void DatabaseWorkers::DatabaseJob(void*& arg){ [&batch](int32_t new_version_num){ batch->calibration_version_nums.push_back(new_version_num); }, pqxx::params{batch->calibration_buffer}); - ++(*m_args->m_data->n_calibration_submissions); + ++(m_args->monitoring_vars->calibration_submissions); } catch (std::exception& e){ - ++(*m_args->m_data->n_calibration_submissions_failed); + ++(m_args->monitoring_vars->calibration_submissions_failed); // FIXME log the error here } @@ -372,9 +416,9 @@ void DatabaseWorkers::DatabaseJob(void*& arg){ [&batch](int32_t new_version_num){ batch->rootplot_version_nums.push_back(new_version_num); }, pqxx::params{batch->rooplot_buffer}); - ++(*m_args->m_data->n_rootplot_submissions); + ++(m_args->monitoring_vars->rootplot_submissions); } catch (std::exception& e){ - ++(*m_args->m_data->n_rootplot_submissions_failed); + ++(m_args->monitoring_vars->rootplot_submissions_failed); // FIXME log the error here } @@ -384,11 +428,27 @@ void DatabaseWorkers::DatabaseJob(void*& arg){ [&batch](int32_t new_version_num){ batch->plotlyplot_version_nums.push_back(new_version_num); }, pqxx::params{batch->plotlyplot_buffer}); - ++(*m_args->m_data->n_plotlyplot_submissions); + ++(m_args->monitoring_vars->plotlyplot_submissions); } catch (std::exception& e){ - ++(*m_args->m_data->n_plotlyplot_submissions_failed); + ++(m_args->monitoring_vars->plotlyplot_submissions_failed); // FIXME log the error here } + + // generic query insertions + // we can't batch these as they're just arbitrary SQL from the user, + // so we need to loop over them. + // FIXME no performance optimisation here: don't expect there to be many... right? + // if there's a lot we could use a pipeline as below...but the overhead may not be worth it + for(size_t i : batch->generic_write_query_indices){ + ZmqQuery& query = batch->queries[i]; + try { + query.result = tx.exec(query.msg()); + ++(m_args->monitoring_vars->genericwrite_submissions); + } catch (std::exception& e){ + ++(m_args->monitoring_vars->genericwrite_submissions_failed); + // FIXME log the error here + } + } } // read queries @@ -411,7 +471,7 @@ void DatabaseWorkers::DatabaseJob(void*& arg){ // insert all the queries for(ZmqQuery& query : batch->queries){ - px.insert(query.msg); // returns a unique query_id (aka long) + px.insert(query.msg()); // returns a unique query_id (aka long) } // and then get the results @@ -419,18 +479,24 @@ void DatabaseWorkers::DatabaseJob(void*& arg){ try { query.result.clear(); // should be redundant...but in case of error in ResultWorkers if(px.empty()){ - // we should never find the pipeline empty! + // we should never find the pipeline empty! ... i think? + // not sure if this may happen if we check too soon?? (i.e. no results ready *yet*?) FIXME?? // we call retreive once for each insert, somehow we've got out of sync!! // FIXME log error, somehow we need to undo this mess. // maybe it's best we do keep those query_ids after all...? + throw pqxx::failure{"empty pipeline"}; // or something..? + } else { + query.result = px.retrieve().second; + // technically this returns a pair of {query_id, result} + // TODO for safety we could ensure the id's match... + ++(m_args->monitoring_vars->readquery_submissions); + // FIXME technically we should decrement this if we throw anywhere as the whole lot gets rolled back? } - query.result = px.retrieve().second; - // technically this returns a pair of {query_id, result} - // TODO for safety we could ensure the id's match... - ++(*m_args->m_data->n_readquery_submissions); - // FIXME technically we should decrement this if we throw anywhere as the whole lot gets rolled back? } catch (std::exception& e){ - ++(*m_args->m_data->n_readquery_submissions_failed); + ++(m_args->monitoring_vars->readquery_submissions_failed); + // how do we encapsulate this error in the pqxx::result class? + // if the query returns no rows, does result.empty() return the same as if it has no result? + query.result.clear(); // this sets `m_query=nullptr` so maybe we can use that as a check... } } @@ -466,15 +532,16 @@ void DatabaseWorkers::DatabaseJob(void*& arg){ m_args->read_queue.begin(),m_args->read_queue.end()); locker.unlock(); - ++(*m_args->m_data->db_worker_job_successes); + std::cerr<m_job_name<<" completed"<monitoring_vars->jobs_completed); // return our job args to the pool - m_args->m_pool.Add(m_args); // return our job args to the job args struct pool + m_args->m_pool->Add(m_args); // return our job args to the job args struct pool m_args = nullptr; // clear the local m_args variable... not strictly necessary arg = nullptr; // clear the job 'data' member variable - return; + return true; } diff --git a/UserTools/DatabaseWorkers/DatabaseWorkers.h b/UserTools/DatabaseWorkers/DatabaseWorkers.h index 26c58ef..9d9fd14 100644 --- a/UserTools/DatabaseWorkers/DatabaseWorkers.h +++ b/UserTools/DatabaseWorkers/DatabaseWorkers.h @@ -6,7 +6,7 @@ #include "Tool.h" #include "DataModel.h" #include "WorkerPoolManager.h" - +#include "DatabaseWorkerMonitoring.h" /** * \class DatabaseWorkers @@ -18,21 +18,13 @@ * Contact: marcus.o-flaherty@warwick.ac.uk */ -struct DatabaseJobDistributor_args { - DataModel* m_data; - Postgres* m_database; - Pool job_struct_pool; - -}; - struct DatabaseJobStruct { - DatabaseJobStruct(Pool* pool, DataModel* data) : m_pool(pool) m_data(data){}; + DatabaseJobStruct(Pool* pool, DataModel* data, DatabaseWorkerMonitoring* mon) : m_pool(pool), m_data(data), monitoring_vars(mon){}; DataModel* m_data; + DatabaseWorkerMonitoring* monitoring_vars; Pool* m_pool; - - std::string connection_string; - std::vector*> local_multicast_queue; + std::string m_job_name; std::vector read_queue; std::vector write_queue; @@ -41,6 +33,24 @@ struct DatabaseJobStruct { std::vector rootplot_queue; std::vector plotlyplot_queue; + void clear(){ + read_queue.clear(); + write_queue.clear(); + logging_queue.clear(); + monitoring_queue.clear(); + rootplot_queue.clear(); + plotlyplot_queue.clear(); + } + +}; + +struct DatabaseJobDistributor_args : Thread_args { + DataModel* m_data; + DatabaseWorkerMonitoring* monitoring_vars; + Pool job_struct_pool; + JobQueue* job_queue; + Job* the_job = nullptr; + }; class DatabaseWorkers: public Tool { @@ -54,14 +64,17 @@ class DatabaseWorkers: public Tool { private: static void Thread(Thread_args* args); DatabaseJobDistributor_args thread_args; + DatabaseWorkerMonitoring monitoring_vars; WorkerPoolManager* job_manager=nullptr; ///< manager for worker farm, has internal background thread that spawns new jobs and or prunes them, along with tracking statistics JobQueue database_jobqueue; ///< job queue for worker farm - static void DatabaseJob(void*& arg); + unsigned int max_workers; // for some reason workerpoolmanager only takes a pointer to this, not a copy + static std::string connection_string; + + static bool DatabaseJob(void*& arg); static void DatabaseJobFail(void*& args); }; - #endif diff --git a/UserTools/DummyTool/DummyTool.o b/UserTools/DummyTool/DummyTool.o deleted file mode 100644 index a1ad179196b6b1f1b9f0b3d9bade20f9fb150391..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 52112 zcmd6Q34B!5_5VwTfryw1iZv?A0E2=`%mk3cHIW3~=tNT%cRLBmK%&`9CJ-tJn29nD zQC#|yR@xu7xU{t`wbborH42K_4Q*|yb)nXU5i7V9clmwKU1rX_nJWSP{rkTkUfz4( zckVgoo_p@O+ndMIs^Ik8oE%H19P4x|b*E9wD)^t&b-B1Kx6ZUG1nt4Nr#y$?7=hzZ z9N)lk7>+_5^z0}cN8=cU!;6Eku{eC{dK}KjsOw{K9m6<)**WfF+`)Ue;u?AnU9nZ7Np7liYKyAl@1lR?1 zQI3neC(8>2T54}D&m$e|SV6k5QV4TBznwo#k| zj{8;finM;Zy`iziKDC#>y&Bnq@%gp!HZ(M7O)giAhE)z`gM3sC@ah@x_np)PONA6P zL8)4*>7dn>dZG)^M6N)cTj7*Jyd>*wU9A7rP`w`hP zTLK{$=q4arp`+nl0EvS=*F+Hzsn9hK4I$&nNCAJKQWq-YukHA*}H zYKV#yO`kOAX6=xa=>3-`_6j01HOF@Ql*PUR)&gq?$YTI}Qgd7?Dw#nb_jTwXKQ40SX2^!^iN)Tl`e_y?Jn zibffXePOGZYsag|?Y9Tx8^+7#xZ4v=pku07c2y}_L7)Jao<|l;OJ-2M?UV}>CEH?l zqpTj7w$d8Igl(n>6D7NcFSo|y(AudL`jRxGYiq|l8+Czn7HQV>yWW0J@6-Fhc0ksR zqw81JJ<_uu*0tkbG3fWJU8UPzxS`zlgTr^ z6WB;}DCwuFBZLO`;5%%;b2(P_* zY6fEkYJa#2!?XFr$gMWd=%-+*JPQM}9yKt^9*nmLUNWF+W?!dOU&#NOtvUw`l~ubi zR@-Oi$`&=%2x_kpGX~lF9Wh^_%-M!cStI7hXgFUXV$Opw3TFe2nAA>5+<+pcZY6pE z5;-mL+k-IANRD6A2~rb|bevpyCFE*Ux?gX9SHB!2p^v6{hZyMDaWNXVW4M>&CKdi? zV+@DMM!Rn1sh$@TjVDwqkVBj5_y+ebG>z(B3~hc&fk`&b>+_TwB}cx=p$!dYFsLg` zn{EVP*Hp9G-%jK5ES<(v8P*W8(mIW{fpr=S5pmKDn>q5-gP{HCGz#{GzcE_gqsE&5 z@BE0;TMVlUC(bn>}LA$->SUF{$Lq zIm^HX^Y0vxsqWXw|Jr@wk6PYW{k{9be?RQ=AeyVQI*s;8n==l~{xkQ5zr(Y&UZ$bk zPUxd)_S)O-hwWGF3xD|WzSiyi`@(-e#$Yx3!rx*0)C`tIm18NC$f$BIR<8Crs@U<1 zYtvTGsMER#Ow*Rh#1a{w(0168=}{t zC!3wV@iApkb~*qzUtZqxK4km@xFP;;s*%nhU#3CxMLoPJ4Vr_SRXPME2XLn*Zo$Yj zxjvfy{L98~FJLySsU73DV0;0M)ncu*YE0guSGs7?OE(Z3OZ`~}aLFzc%Y1SWmulS) zt3e!=ivv(vEEjvCf0NF=FvGXS2r3v`NFz2Ka(O48D0n-$UaU)!w9DiK70_X?FSEx+ zsG+$y>_KIw@hq%CO8~T-PD9)A`!JqOTR0EIqhjHln0Rf(j_4HdUDi!a{Zk0=Jkp-A zelCZu7}1XZk`ZmhU1NaOE2>*c>C`2S9x0lY_h%eMEiRh-JJ>$S4fP!%#fx z+X4_#6`L?o-&R~x3idT(H8tv^o#rQHK2!~DF-3*=U~HGt3sdMl1yZk7$SCz{ph=<7 z>nAju^V~Ps?pq@=+ObP~1<;Wg+hW~%o$n*$qq%zlI=`fU zZj0Slg{s3ccl7U`=nBCHYvJ*GM4^K5b)rBirI|6$%TPLmr4p(19azNB#>}dy)DmH% zI88OCSg`LNQGt#fZ5r9#kE*g!7*Rs(Bo!uik{*^>*zQs=zNC~)x0Z_5S5^E_TPH4J zYkXvD)ED6k!X0zx#cuJf!!_#aPMpG2m#cAsVKiZP;e>1mf?{{IEsyS+edX1$FW|B} zhHHPVjxQ+0sz{_H5N{nHh+pNkdp;lHx%Ss!V{h)qh-8lqN9=ta_r3ky&sN7iK#l_B z==y^l2ftuhgU{=UdcdDn8NAi;2H$wt6x(+JxP)9^h`l0=Mjh3|f{|y%%Qy!?>)nwr z;(m2}DfHMADXESxgdR)1fu1j*$L~{m(ArJ!=Se+2Lk{S%r|Si!-PG_hUEe5LKf4N9UzkiV;==` zc2OI}NW_K{1!Egj8~p-p^ls5cv9-{9A4~wZ8d9-B?8_iOje5$(9(i>P`ogV>E#h87 zT~aY|FArcN*M=5W1$J?2WK0Us=J{Xj-se2go50;Jo9**P4iH`ck8uAe+7>)4NUFUT zjJ+WWWcO6nBj|7<(((HgvNU z!7|vl!PgGGFoL)HY?N7`6dj+Y<;Gfyq9Q0hl|%dVa^N-3v$LNpQQ+SljE^HDsC=H` zQQrj!qjYY>xtfTGMF6EPBaCE2uT4Cl4ur~3T6$!;PVxVk|=RfA`UK0|~nNUs5sM>-Uf>hnBpC<#49eU=NR zc;7=wlm_rjSSfk-^cQ+2M12KNRfj8GR={3@d-@xRq*#$qsVGF3i+iyR-Y#oeEs^%E z^wr{7DhiS^qO)4*t5HnoEe<;_`v9^UZYkNZ$4Kq&V9r03U)>KMAg@?~r|<>%^;R?< z=@mbqW{Hwx{0>L-+(&&7Nmd1*Y!~3x|1?}<491dIVO$vOTjwL;{^v*~%v)@0N!NS7OqT!+NF7;qGU>-sC>K$Lyix}K$9m=xJ9}hj8;xYLp0G{B0XI> z$byh`N7Xr`Fy^mXCh4VQs<>75ic+zv_cc%Nx8eJh@o=Mv0qAM%_@%Yv_YY8oBcYq< zr!%Rtp3d) z>9w&7m2eR*)EgvJOY}Au38(Pkc!`9}e#3B-kUUJ}OT8vCVE@&xWh~!B<3_{!OEpz6 zb*7?n6N7fEd7J9-UKV~qV~^<{p$qS;`uIU;yUZ?o%F|a$$b;zFm*Ut%(Dl2qmXH_; ze3c!0%Z{aT=c(L@J)rQ6Dz_>gflRAbc=0kl=>cFFvp`whKaIV}Nb0 zYJ{<@iihcMq8qa_%AcWM5oPb2&h+z=%yj);CjABh$9(WTuHP;^pxlW!wSJFE{XPsV zr_w>cPe{P>J*1y!C6%W#_EF*^L5_K-u;aIA7Bu12!4n%Knuu*Wb1I-*nN46d8twMcFH8CQI zt9FN0A6@+!tpdJkr$*JiKSBMWO(vG$k>n%%x?Myf^w5cV_`MuwQ{1>+;25|h{5o#f z@TV1IwLaN3`k#|#Lk)|bQdYX408|U5Y4q0(Jp>(jlz0d;6Mf=6)gFne!2evbXV6`Z z!HH7-ndU^-(OPOs{}AFLHO$0??`d3*60)unpD)&l)VlInxJm6-3p)+g{N^$HVYcy_ zC}yS4PE^6bLVkseFSL)m0hJ@dQF3f|7`Ds2@_YT-CUk;HdCHO42nG?@o$e0mVF$IePVm2Ka1&15qP;Q4@k><`(vBC#@Ml^{Ck2EKC(}f!MqThYGz><2Gz;uSbjdQKI#!Jo$21&NkVNC?7H;t^cw zH8CzsJbZ%~eQhgbo4?YE8_aISc5S3nGO_oSd)^*z$H+TrR@|#X41B8`?@%C`((M0m zfr`)y5CKS_>qYa!T`=BFivXBY0|~^jUPp^y@1hlA1E(p=yKhszN}R%vaO|;um6&w>dIY_=|qz#iL?^ z7s+f?N~$CEvT^U~KWHvNB zG7x(?@o$Q8il5v+!=tX(15h&nH3MmC0tG*zW<>Lon^ub7ONrHtU-A+0L);Ln7lAlN zOk_D(a@nZ+Xwiep{ySt>ioU5!=ZR7?3&dVbOvG8g!vl$)-W~KHz7(8iK-EwhUn~su z%@9RDLUELwN!3FGu~!mLV4$ft4XDsN!~ofjcbD7o3uwilCZ%SOnwVH@Qandy@H~vh zf_aHc9b6OC{g{}Z>f=0*%;5PAJ2ujn_y8@NEBY2GisoWcw16`;C|ZzM;ozFEdp3<{ zKNe9lbXpJZQETB?&WJB<*Q?&iI<(`Lm!kbeE`oEb2B5rLEF&|zq?boV%i2{Qjs_bU z8H~NY<2R}PR#!Bhb-gCXMjH!l%v!VseP9z)e@q$+_uiJ_cTQ_Nj;o2R}xK@ zrz!Gul6rd5T&pA8aDvzBj5IYjd*$`~j&QgYSZkZNzOA{fs{_~VT^;Ss;S^*^K7JY4 z(6sdLj*>AAWisE`Tc~aAWv_ykG{lgy z>=jS%>FB}|^oOr~c%p-V2%986p*X)6!+4_GK_%&5)>qmTNz;lM$v_w65)YAF^)5jo z0Ci}YZLj7@yy#HZ6D`vUB~BF2FxHWpet=t;YtNpX;)?Po23tSQaX&xUTAl0u{b1`k z*T&qJ23uA{A&?;Gw{gXVAeI(Biy2cgE>Al~_N^alhraew^!m&u@L` zx*7A{EqSM3S7P0Ffcq1_b;nTm_e!i^4~t{T;_5?Af2hRTeu(>rCDva@xF0XE{&eVu zoU2Q$w+j34Zgt{u7 zQ1+bY25{YpZalp4M8Mxakz}nqalU2s6%%?>G0}au82bhH6uTk&SH<|<=#Ju{#7Izl zR*ZZ-ej>ZU@4l|Yy3J4JxXVwv|H@A~KI$iZpY((9IX@NkWxxBT66)2f7~~XYDx9{ejPVCf~hjob`OZd&4*@?s4BT&e}A*;%%R`{$TgIan{cdajzU_ zJuw1IA1!ph=(9d4bbscvoZam(ia>_QMo-;41}K0gvX9oY)>wBW4}O@035eUhFH+L5YJzsd9v4z&GwB~ zI#gkjvBtLzRL~$!J5_S~o(fQmHU05+4l;>7E4@Kig(b6Uv14Up*P8fDd;$riQJ=pN2K@G0(BEw6#0{20_73yUpyrqOnrAnmTOf0kPhLlp=U>Q&qF|$(D z8!*XTN~+ma#DbhkSK0DS6xP|4Rm=zBT=yh5Unq}f_&w5Esz?;b`iWO;XJU=mY+8mp zEMN8~3dL=dZsmRt?M>UUt%<*5+@{(iEj#tnX1E>Oo*0cmqHd>XS}Tx`max@I3Os@; zK&_F|Lvn+DoUyK{U%sGMNyH=7kP^QavV?&OiJ4rKO*)WV3YL6j_;zfnh(}8!j3aC^|syQ2N;zCiZCw5@z@`*%{ zZj=>KqOK`e=%9P5AFOA1qQ`?kv;^T!^y!CBVGeX2J7kDWe2!Z|uwxV12aR)Mq8pjS z9EI#jiH&!9-o^`j3lSD*gcFy zzY1?fwR)?|KF4}|wP*V4v@CP5*g#75cKGGpC`1~uM@*~jWlt?R#C{06EX9E>;w@L* zg}1+c1m=YpV}Sb3>e#OG=|G(J}5BS|6Ff>&||+B^aLKq zN)+P6E-aWlp89x6^@N>jr*>(B+NE7wj~&__PpI|FKD@pyy6>{~!iZnHryU>Wg9?aY z=%pLIC}%L<1j56qj3_CJnW%&6t3Xuc-@T(pujf?x`xBjb08o6M;oUmPPW-J{pH!>Et!nBuxRpf{uia|Rqe@x>5bK}YO;Dl`|;|RS2 zG79>E^@_uAMm(c8Yc>_8MNVjzl6ckiJMLd}8wl;#3wGJ1g^?{7-K0cee>mOCLcS>aWDEW6Yj*_Khxc9gRn<$R*L>=6IAs1Xy?#YCcK6$ z=L|Zu#YN~u%)Zp_!`Cqo+!dufo#UT0yREHxdPiMLcwt+|<&){IDvd<&-ItTj;Vx{Pp7jx_S!()Kr==ZQ9A+@iWeuGr{Zk7fmTD z_LdZvOfH`4FY}I{8E){}brJPoV#x&g{Vd9y^X-{dPIp1hk;8`MtwM7s05(!_#jsT* z8-I({^?Cx&%rCeh_ssml>s@E&d;12RnLmD|yCT2%n!y$Mr9DF`^J`tVy7EhbROF8b z1t^i@%=|oLos46#;`K8+2l5M6=2qkvUgN6D_x23RT{$$rutHFubs%7QDZkUTAtPU9 zD&MO7!b&AX>!0)OnxH7%klQHdk-CgExbZ1oVU$PMF8RiN`LJ9f}Y$qQ6Kbd7&#M?XdZZWO>Vb%Ms*%k`5JL8%9N_F zfT*uPvc4)vAF};2l?Pw)QFcJZUgKIxV#o$-)${Fe2&3&&HkgsW*6sSM8#c)FC}Df@ z&n>D9wZuDHU8eX|=GWvt25zX`%5?uk2^J&!NagtRiKpNd$l~jj(${NpAIhp(<$<|x zWv#Xu2j-rS2QxDKh~E1a_UTt;c@bY3N!e$HNrxHv{ccz3(5%9eJ~Bd;>3!Ho`ewhI z`zF=f2=TzOj>WZwhIdWwMZyp0)|CDz_`RD7)Qr$&;T7*J$t9vp<^z|7pKWAGE(?ap zl3e0TfawpY`g0jB;*}zCi7y7G)6<qSvPSF6w6hBV!!ueEy=-E^ zYw25@JndHaksQSJrwVt#a$o~KP7iX6n0UB#oFl_fg_k&RkHXJz;3E`1!+{^JaNQ2| z<53DfPXp>aPT`k2@Z$xZX@9?>4?F0mDxAi8Je{U+{D>(nO;h;NG!(-qgx zT%3}qWyJ;HM6o9)Ak_&ndb)I3r#KLK%eI5kp1TlPvn3ZeWlJvE<7PZ^S?&ys0GFks zd-`LS_>o#VCh2wbPk-jJoYs`jT^4?#k|nv4y+`^(mxYbXEXidZl!4NZzsIotT3h7b zzzZ^{(O-ce8-6hGF)Yf6xdQV0fRAQzQn$1-4D{a=cq%GsKJ~y4X5$l@2T)p!^XS}E zoYaPBQ?sCm4g78P43aaN3-#O_Wy~%v++3t_!u_sqB$3(Rs;W^8Ss7s zzQ=%%gPd&oUuM9sG~hoq;2#+9AqaWdOQV=NB3MBBx(u^^3eEB5#Sfo+Piu(-OgJblSpXc;IwV#mYf0cfzGs|A=M3|3SmFE|Agy$87Li4-3L+#;? z&Ng!2$fD2!;q$EM18j*?JHnmejs@YW7KiBun7<4yQoem%RdaTIi`5=!>Y&sSN=nGon;Y>1aUJiinV^ z?{HmZs6biNBwKqHnHMOG5k*v&SQe2Vp6mn zGWD!E+&VwfD4)Tw$s)*{b7A57y3Pn3ZBFaLrq+hg%y4H{OPKUYxl$_JNIjc7t4Pd# zHjt!SGbc;g8?WJiY5Sr|n0+3Mo63@`qKr;Lgw0uzli&<>&0*a}_Dk+p%bd{`DM*tE z9bLhcS&_Dm@HE+&XP=j<0-{abDFSD15g7bsRo(4v9g%5mt@E1ZcXiZ7n%Y{0Fhmko z^im$uq(5hKWYy2g>Mljo2_autsXu%6SK>(ZxR*950hy@&aW$ z5GO|-XjSzc;g)b~gm|Q28V!izN-g$vaDtIYb*LkZ;X*yaVv7iCqSYkLs6xE69;Fvd zniY;zar+K1l2U}(KZPjMuSE1EZR{4s`ST)f7oz^yK>*Ii}XPwY1lDV0I#U?AC=esDkCo zk`om<@(9(T$L>sy zYn_W)>#J(&5pO6qHg%xkh`~j6UBBui#-a&kU1)B2ep4$(K9TxHg>;0)h)CVxM}Yb% z4RewKdosWQ8D-lpA?38+Knj?#x1j)&YK*{&kmR$VFG}m9_Gb$ zbDI|U{p3u|h^---T_EnvYZGIlS*hUxi3+6Ort_fTVP{hoHo`cVa)v_-nmQs~bp@8@Lw`~F5~kw!^tMv4u4{J1H-p5ob&lN!^4dJ5Gp9raX9AT(ERn! zR;ax58Gf>&C%lp2(-lsqV;KHT1N{<)H!*s-|0Un$xQyYqF?w!?yAAkj3@4w_<@!+J zRNnI$P9JHaMCBsK)bxc4r))(GKZ?<}Fr4;rC^_jT80d=)^ivGVDxp2{#^t94Kf%K@!{?3Xob^>^XX&sC*qmb^LqyRFBs0-m-yix z^3!9EkHUTjB_~e5Ce-*`Txhv77|z>8Na49>_DrUn*U-(&)ds&46kSO zs~OJq{5iwJjQ$yglU&~J{>bRLp06;R>r;UJ8cJk0-X04X&fDWL4Cne!HsI44&dWs~ zxu-;OdASxaoa=wJ!l}HpO04aC9i!*vTFr2-=ba4a`aH~VuFqc>&h_~R!#kKhwEsnk z%Eje=&TuaGkRgIR!w$6nMTz)uxu-FlmzVaxC^_qK0mFGcMj6iayvcw+#_(Fm(e{5; z;Z!d?p1j5Ac^vq}fRD-p10>?Ze~8gHGW;oq zb3OmUaIVkW4Cnd`9tx&NWKXWoHyF<4&R}>G@@PHJW%y+bzm(xz&&v%s{hvaVNFT2M z%?#)I|BT^W{|6OL_M~^dwVsbLdS0$SGMwxA62rMZ?=zh1bI^ehhUB#8Q4HsD&tW*X z=Xngj4E(hI^u2CM#GmWgVZg6tIM;s-!@2$&7|!`WX2Ac%aLzx$aL(T~OsJn>ACJPR zzUXa3UEaeOJ-6F9hI2h9F`VlYU^v&Op5feoIvLL8u46c_uX`BI>+2zgb3N(*4Ws0& z$M+dti|5)7Bd~u>iR^Pc4vmjgIPu}}_5=g|O~&U0#^*u<{YFNA5~F|0Kz}&)ohgw% z-2NvsoZF$2;SI>6%QchXoKGXec|G-Ql8zJuYs-rr|9 zuXoyKr$lmjc?%8r1cvke?qmagmI1G2IJesZhI6~!z;LeT_YFAx51Evl_4^pZIsd;i zoZJ6Rg;RaeJ4CwPcQSfjFP}4<>v^DhX~C(_XohorPGLCL=WK>^xr-Uj>pjYFUhg+C zoa?#PfZxY(USGdsIM@GmhI9V!8*sP!NrTfqM>3rApTcl%p9+OjeI0{C{NxSi0Hf!2 zJD=fP&oIMzd9P$R*XKJ7=la~PaMGs?hxVTzGkUJi{S4>&Ji&0T558C<64{OG^KXW8 zeR9-$PoxihEJf>cfWnCn*XJ;XbA84!oa=K6!?`|l7|!*nQ#k2E^`)AR)x_wzJ_{Mn z_333e*XK5dbA5izaIVi|3MYN&ZD(ztrx`uh=dTRs`n=9?uFt0o=lUFa2!tV#{kcBl z7|!KRVmOyu!Ei43e1>zmS1_E*U7~QZ1HJpH?a<5Uxjr{Doa=Kt!?`~9Go0)5EW^1z zFDab#AwMBIQF@)xbA8@tIM*i!|1&uy%Fp#VjNx1#Kf}2`rzxEDps8dp5fe{-3;gY+{kdQ&+QE7a{tV5ZqFAP&h43CIM;Kx0UxP; zI7arooY|pR;l}~v>&&wa^z#k$Q3L(02Ko&K`ppLV*9`O@8t6x;7e1ZkJyzk)@>Uw? zFEG$Y4D{C;=+_zOA286r!05TZeZp{F?}O9}s8rqtR=)=`obwsaaPGGOg_C{eF+S%p zdhXAS4CnqFQ8=|bimN(~Ej7@uXE^u&2N=%%{~3mJJ8U)J?=hU)Vd&vd4vFl>`HWyV zx91p!b3UaE=k^RUoZD?7!@1qAGvL2rIJeIe4CnTFk>R{tZyE5Y`bjO>fyaki74B>= z8w~WnGtj@m=vyH}`@_E&-pue14EU!8JXig|k@V+$4p2DxVG-k>&*)niekj8^pOFT9 ztN}mHfcp)2sR2L3fKNBz)dqaF0l(OQ&o$sp2E5&XFErp+8Soy4b3eI`;k;gMG~lZh z?yUFkGkRX{>ln`Y|I~orYrr2g;Qa=CvjP8u0e{|rzi7b!!EkQRHw^f{7|!kRfdT(i z;Z8g2pWIV>e+6uiF{@HH`jO z4CiwH&G1@Ae>e@Yk#ceH{gKfMC;jViXnp(!{6fa(1V&$HpnrkUmoxfT4D{2F1rd_d zo);PLRs+6(;q*?OF4uz$=W*ae1AgInp^ngp)8A^q+b5*xzsKZW%Wz&_KV|p|M*l3s zV+_Blt^fN5dLKDA64|E-ho(P);g>P|G=_6KoMpg&z;NylpE8`+%UEh8 zNTd(1m$Mnp>91for@x2cmxGrs*P{w2bDYocV{x#m)}Lg zn9zaKr>`lkb>K@CezybPO$s2bci@*R`Uf1i{BD$G^*ivW;`5XP*Y7uPap1Qr`mGLJ z>$%N=pP}ftJMi0-p1U0QXA1wwf&WVJ(Vsk{Te9K^c5(9p=0l&4^=1EzsA6St^ubpJSAs&P5OUdl4sEW*nm@Pq~zqU`+36KacKLGAz~ya zyLdHjFRfa~#&Q~#l+_{ngt&lrYtxqAHKh%P;!=6k5ioVdfefp z*W&^wezSqU9v3+I|A^5qgkIWjUDObfX#ThyhsF<6IPoE$(YPK5IrY)w6er%y_;5R$ z`jIsT`a2nZ8`J0K4Cnmyc!%^^!|3&R$EoKZ6d$q!U$1+O@#pJx?=qZXi?+{HL!6(^ z@Rf|7$4|cg=>-iXr=B{lI`!230?~6l|F8L12rV=99HDU1ldo@2WjMDptszh%{rUR# zDuGJ>T!w3HXC1GcetQt}^Q#$sxdC6OaHpQ%GvL2ve0muF=NLV&$D^2^aD65yoK9yl zKJq)X(hk%|>+;I)&C2olG=^6)K9vk_P&nC>^NASnYYq6V2K>Jn9%XWMyQ9Zd4F4ZS z&-MAO!pZ*JZrc=2r|FCjeW#R?vt0Gc(FyOxq3!l6!+Cs2DBM}^2NMG%Cw`p5>BRZ? z89n#s8H}E{$2%DPa`4i6KF;WQyuHIs+Q{-=i?gP`pW$4erw#Z^2K=hQg1xZk3dVmO z!}8a19b`x6a8H{veDnc==5`<{}IDEe~xqdyBU3v;j;dhSV&>lh!R*Lv&ka%leg`^voz z`csJs(iVmj{~Cc>mR^t2`sn$LUXLPNw-L&szp+aA2m(|(fdC{r(V@SWJcZ$;|Jh29 zT85K8`v0$JVmQ&yQuNChu6+rQtr)|J{$fS{XNDh6gevKEWqN#s260}i#sh@Yp}*Jr zV}=v|yA}Tr7*6`@|L@{+h7-MBM=PX&fkeF1;zLpAqLxVAT&oD5my_p4ebo_et}DWK zaqwl(27Fc2Dw^BbX$hc6eljWR$20NGN|lYM{(t{=Y`V6o@9~JQz8N3Vrvi&Q+Q=$o zH-8bnfto5dzIxeodANCzRW!dIA5iPA54T4`uq{3|#HfUEk{_KK`1hEyXtrP9b*kir zfHID6Iepo83R7j4CfJ|c_}XjAV*AFxk@G*~Vtf0lTzc{TQ%ZgvUy5TT>5SCjlT+=g z2I!NHp}LNaI=H%O4PWvBEy+f-8Pq&7AU+sBsm1xFTBVZfkYQ#V2skBIz zuD&y({WhtC_-<*@mwl@jUt(^d4_8`6_}|yVMfgAXCc=*RSgw>GZVb(%|Ar9ZqN5GA z{aVsW3^s;m;Z&|LJ+?Rl_vhnK-9z$h%5SUux(}iJGxi~WgUYY_Zpu$Q2P(f?<=5kB z%D-eE@<+4e@7ss`tFq)LY@qtDQTe(3f4C3%*JY9aqkYJ~K8t)B&kt08Jr3per}6hd z^KVk}d4Ae=8fgA4S@K`G5BXorl0TFszZVz{wfNW^x7IHDgEXvIp7q}XeWA`r;!I^K zSNHlq;z^w{zkU?g7+=Wr#{XAZd10*M3XZb0nWXoTm^7H?9kN7$B&jcpy=a65xmZ&YORw+RCqxpf;er^cQCcj@z zV17nSSaQmz%-Q63|43%)S9wYPp*WoKX@4i1{Prm_pDLfLlcLV_KYDLN=R^=r9kHX>9 z|6zmtT}p73%CApO|GVEHf2)$O`$3XFDvSI*O1`uHr<^7w>iDbWgIpwiruBZhcIFrV zVKZ=Aui{CUU+1U!GF?04$1){f|35>ThqL@wB7Zjf*Zf50Xvg@55|w`}4lQ5D&xZ{1 z+ow}5#1E~gyv>L|zc$G4|Aow8t9&GX91f@a5ZXt!@^7KR5|aM^dE{ND|6PRq*~*`{ zUgqdm8A<-JIGpk)AhVFK==7=lEO|wFcyij`hx|f5C&KkpNXfrpUR($dgQ5H$)}jfle7HoD!<;J*0d{B zekZ+_ujww=`a9%bp!6?Rgrt8F4xL}?zuKVxY9*i6e0b9NHMYT^|J^FTmZRyN`F~;1 ze}&TDRvAfu>e6+7&G&hO{C*|BR0~#TonOOS4Dz=q`L&9WklaL=Ku*w|;=vU>}b|(9m;m~q5-k{`bKi7O~mHZPmpw3iRl=Qhp<(Fvc z?+q${3XxYjpT@6N`5o$8t5kmae-?St`fF^7F24rUd8;b_TE&>kPhFbMugg#RIAs%y z{&VTCWo`Ayb2&~lR(9s6HGXI81Z1ra(O=8DL*KM*si- diff --git a/UserTools/Factory/Factory.cpp b/UserTools/Factory/Factory.cpp index 4cf7d00..45fdef8 100644 --- a/UserTools/Factory/Factory.cpp +++ b/UserTools/Factory/Factory.cpp @@ -5,17 +5,17 @@ Tool* ret=0; // if (tool=="Type") tool=new Type; if (tool=="DummyTool") ret=new DummyTool; -if (tool=="MulticastReceiver") ret=new MulticastReceiver; +if (tool=="MulticastReceiverSender") ret=new MulticastReceiverSender; if (tool=="MulticastWorkers") ret=new MulticastWorkers; if (tool=="DatabaseWorkers") ret=new DatabaseWorkers; -if (tool=="QueueTrimmer") ret=new QueueTrimmer; if (tool=="WriteQueryReceiver") ret=new WriteQueryReceiver; if (tool=="ReadQueryReceiverReplySender") ret=new ReadQueryReceiverReplySender; if (tool=="WriteWorkers") ret=new WriteWorkers; -if (tool=="MiddlemanNegotiate") ret=new MiddlemanNegotiate; if (tool=="Monitoring") ret=new Monitoring; if (tool=="SocketManager") ret=new SocketManager; if (tool=="ResultWorkers") ret=new ResultWorkers; if (tool=="JobManager") ret=new JobManager; +//if (tool=="QueueTrimmer") ret=new QueueTrimmer; +//if (tool=="MiddlemanNegotiate") ret=new MiddlemanNegotiate; return ret; } diff --git a/UserTools/JobManager/JobManager.cpp b/UserTools/JobManager/JobManager.cpp index d326b7a..df81bd8 100644 --- a/UserTools/JobManager/JobManager.cpp +++ b/UserTools/JobManager/JobManager.cpp @@ -16,7 +16,6 @@ bool JobManager::Initialise(std::string configfile, DataModel &data){ m_data->num_threads=0; // tracker worker_pool_manager= new WorkerPoolManager(m_data->job_queue, &m_thread_cap, &(m_data->thread_cap), &(m_data->num_threads), nullptr, self_serving); - // FIXME add to other Tools ExportConfiguration(); return true; @@ -65,7 +64,7 @@ void JobManager::LoadConfig(){ if(!m_variables.Get("verbose",m_verbose)) m_verbose=1; if(!m_variables.Get("thread_cap",m_thread_cap)) m_thread_cap = double(std::thread::hardware_concurrency())*0.8; if(!m_variables.Get("global_thread_cap",m_data->thread_cap)) m_data->thread_cap = m_thread_cap; - if(!m_variables.Get("self_serving", self_serving) self_serving = true; + if(!m_variables.Get("self_serving", self_serving)) self_serving = true; return; } diff --git a/UserTools/Monitoring/Monitoring.cpp b/UserTools/Monitoring/Monitoring.cpp index f04d536..c30320e 100644 --- a/UserTools/Monitoring/Monitoring.cpp +++ b/UserTools/Monitoring/Monitoring.cpp @@ -5,20 +5,26 @@ Monitoring::Monitoring():Tool(){} bool Monitoring::Initialise(std::string configfile, DataModel &data){ - if(configfile!="") m_variables.Initialise(configfile); + InitialiseTool(data); + m_configfile = configfile; + InitialiseConfiguration(configfile); //m_variables.Print(); - m_data= &data; - m_log= m_data->Log; - if(!m_variables.Get("verbose",m_verbose)) m_verbose=1; // how often to write out monitoring stats - int monitoring_ms = 60000; - m_variables.Get("monitoring_period_ms",monitoring_ms); + int monitoring_period_ms = 60000; + m_variables.Get("monitoring_period_ms",monitoring_period_ms); + + ExportConfiguration(); - thread_args.monitoring_period_ms = std::chrono::milliseconds{monitoring_ms}; - thread_args.last_send = std::chrononow(); + std::unique_lock locker(m_data->monitoring_variables_mtx); + m_data->monitoring_variables.emplace(m_tool_name, &monitoring_vars); + + thread_args.monitoring_period_ms = std::chrono::milliseconds{monitoring_period_ms}; + thread_args.last_send = std::chrono::steady_clock::now(); + thread_args.m_data = m_data; + thread_args.monitoring_vars = &monitoring_vars; m_data->utils.CreateThread("monitoring", &Thread, &thread_args); // thread needs a unique name m_data->num_threads++; @@ -33,9 +39,9 @@ bool Monitoring::Execute(){ if(!thread_args.running){ Log(m_tool_name+" Execute found thread not running!",v_error); Finalise(); - Initialise(); // FIXME should we give up if Initialise returns false? should we set StopLoop to 1? + Initialise(m_configfile, *m_data); // FIXME should we give up if Initialise returns false? should we set StopLoop to 1? // FIXME if restarts > X times in last Y mins, alarm (bypass, shove into DB? send to websocket?) and StopLoop. - ++(m_data->monitoring_thread_crashes); + ++(monitoring_vars.thread_crashes); } return true; @@ -44,15 +50,38 @@ bool Monitoring::Execute(){ bool Monitoring::Finalise(){ + // signal job distributor thread to stop + Log(m_tool_name+": Joining monitoring thread",v_warning); + m_data->utils.KillThread(&thread_args); + m_data->num_threads--; + + std::unique_lock locker(m_data->monitoring_variables_mtx); + m_data->monitoring_variables.erase(m_tool_name); + + Log(m_tool_name+": Finished",v_warning); return true; } // ««-------------- ≪ °◇◆◇° ≫ --------------»» -bool Monitoring::Thread(){ +void Monitoring::Thread(Thread_args* args){ - if((m_args->last_send - std::chrononow()) > monitoring_period_ms){ + Monitoring_args* m_args = dynamic_cast(args); + if((m_args->last_send - std::chrono::steady_clock::now()) > m_args->monitoring_period_ms){ + + std::unique_lock locker(m_args->m_data->monitoring_variables_mtx); + + for(std::pair& mon : m_args->m_data->monitoring_variables){ + + std::string s="{\"time\":0, \"device\":\"middleman\",\"subject\":\""+mon.first+"\", \"data\":"+mon.second->toJSON()+"}"; + + std::unique_lock locker(m_args->m_data->out_mon_msg_queue_mtx); + m_args->m_data->out_mon_msg_queue.push_back(s); + + } + + /* // to calculate rates we need to know the difference in number // of reads/writes since last time. So get the last values unsigned long last_write_query_count; @@ -144,18 +173,14 @@ bool Monitoring::Thread(){ <<"]"; SC_vars["Status"]->SetValue(status.str()); - /* - // temporarily bypass the database logging level to ensure it gets sent to the monitoring db. - int db_verbosity_tmp = db_verbosity; - db_verbosity = 10; - Log(Concat("Monitoring Stats:",json_stats),15); - db_verbosity = db_verbosity_tmp; - */ +// // temporarily bypass the database logging level to ensure it gets sent to the monitoring db. +// int db_verbosity_tmp = db_verbosity; +// db_verbosity = 10; +// Log(Concat("Monitoring Stats:",json_stats),15); +// db_verbosity = db_verbosity_tmp; - /* - std::string sql_qry = "INSERT INTO monitoring ( time, device, subject, data ) VALUES ( 'now()', '" - + my_id+"','stats','"+json_stats+"' );"; - */ + //std::string sql_qry = "INSERT INTO monitoring ( time, device, subject, data ) VALUES ( 'now()', '" + // + my_id+"','stats','"+json_stats+"' );"; std::string multicast_msg = "{ \"topic\":\"monitoring\"" ", \"subject\":\"stats\"" @@ -172,20 +197,22 @@ bool Monitoring::Thread(){ } // reset counters - last_send = std::chrononow(); + last_send = std::chrono::steady_clocknow(); min_loop_ms=9999999; max_loop_ms=0; loops=0; + */ } - return true; + return; } // ««-------------- ≪ °◇◆◇° ≫ --------------»» -bool ReceiveSQL::ResetStats(bool reset){ +bool Monitoring::ResetStats(bool reset){ +/* if(!reset) return true; min_loop_ms=0; @@ -236,6 +263,7 @@ bool ReceiveSQL::ResetStats(bool reset){ std::string timestring; TimeStringFromUnixSec(0, timestring); SC_vars["ResetStats"]->SetValue(false); +*/ return true; } diff --git a/UserTools/Monitoring/Monitoring.h b/UserTools/Monitoring/Monitoring.h index 195e6cc..62194c0 100644 --- a/UserTools/Monitoring/Monitoring.h +++ b/UserTools/Monitoring/Monitoring.h @@ -7,7 +7,8 @@ #include #include "Tool.h" - +#include "DataModel.h" +#include "MonitoringMonitoring.h" /** * \class Monitoring @@ -19,14 +20,15 @@ * Contact: marcus.o-flaherty@warwick.ac.uk */ -struct PubReceiver_args : public Thread_args { +struct Monitoring_args : public Thread_args { DataModel* m_data; + MonitoringMonitoring* monitoring_vars; std::chrono::time_point last_send; std::chrono::milliseconds monitoring_period_ms; std::stringstream ss; -} +}; class Monitoring: public Tool { public: @@ -36,7 +38,11 @@ class Monitoring: public Tool { bool Finalise(); ///< Finalise function used to clean up resources. private: - Thread_args thread_args; + static void Thread(Thread_args* args); + Monitoring_args thread_args; + MonitoringMonitoring monitoring_vars; + + static bool ResetStats(bool reset); }; diff --git a/UserTools/Monitoring/MonitoringMonitoring.h b/UserTools/Monitoring/MonitoringMonitoring.h new file mode 100644 index 0000000..b820a13 --- /dev/null +++ b/UserTools/Monitoring/MonitoringMonitoring.h @@ -0,0 +1,23 @@ +#ifndef MonitoringMonitoring_H +#define MonitoringMonitoring_H + +#include "MonitoringVariables.h" + +class MonitoringMonitoring : public MonitoringVariables { + public: + MonitoringMonitoring(){}; + ~MonitoringMonitoring(){}; + + // TODO add more monitoring + std::atomic thread_crashes; // restarts of tool worker thread (main thread found reader thread 'running' was false) + + std::string toJSON(){ + + std::string s="{\"thread_crashes\":"+std::to_string(thread_crashes.load()) + +"}"; + + return s; + } +}; + +#endif diff --git a/UserTools/MulticastReceiverSender/MulticastReceiveMonitoring.h b/UserTools/MulticastReceiverSender/MulticastReceiveMonitoring.h new file mode 100644 index 0000000..7b1e0bc --- /dev/null +++ b/UserTools/MulticastReceiverSender/MulticastReceiveMonitoring.h @@ -0,0 +1,36 @@ +#ifndef MulticastReceiveMonitoring_H +#define MulticastReceiveMonitoring_H + +#include "MonitoringVariables.h" + +class MulticastReceiveMonitoring : public MonitoringVariables { + public: + MulticastReceiveMonitoring(){}; + ~MulticastReceiveMonitoring(){}; + + std::atomic polls_failed; // error polling socket + std::atomic rcv_fails; // error in recv_from + std::atomic send_fails; // error in send_to + std::atomic msgs_rcvd; // messages successfully received + std::atomic msgs_sent; // messages successfully received + std::atomic in_buffer_transfers; // transfers of thread-local message vector to datamodel + std::atomic out_buffer_transfers; // transfers of thread-local message vector to datamodel + std::atomic thread_crashes; // restarts of tool worker thread (main thread found reader thread 'running' was false) + + std::string toJSON(){ + + std::string s="{\"polls_failed\":"+std::to_string(polls_failed.load()) + +",\"rcv_fails\":"+std::to_string(rcv_fails.load()) + +",\"send_fails\":"+std::to_string(send_fails.load()) + +",\"msgs_rcvd\":"+std::to_string(msgs_rcvd.load()) + +",\"msgs_sent\":"+std::to_string(msgs_sent.load()) + +",\"in_buffer_transfers\":"+std::to_string(in_buffer_transfers.load()) + +",\"out_buffer_transfers\":"+std::to_string(out_buffer_transfers.load()) + +",\"thread_crashes\":"+std::to_string(thread_crashes.load()) + +"}"; + + return s; + } +}; + +#endif diff --git a/UserTools/MulticastReceiverSender/MulticastReceiverSender.cpp b/UserTools/MulticastReceiverSender/MulticastReceiverSender.cpp index 35cf6f5..8345ad0 100644 --- a/UserTools/MulticastReceiverSender/MulticastReceiverSender.cpp +++ b/UserTools/MulticastReceiverSender/MulticastReceiverSender.cpp @@ -1,27 +1,27 @@ #include "MulticastReceiverSender.h" +#include + MulticastReceiverSender::MulticastReceiverSender():Tool(){} bool MulticastReceiverSender::Initialise(std::string configfile, DataModel &data){ - if(configfile!="") m_variables.Initialise(configfile); + InitialiseTool(data); + m_configfile = configfile; + InitialiseConfiguration(configfile); //m_variables.Print(); - m_data= &data; - m_log= m_data->Log; - /* ----------------------------------------- */ /* Configuration */ /* ----------------------------------------- */ m_verbose=1; - std::string type_str; // "logging" or "monitoring" int port = 5000; // shared with service discovery, logging and monitoring std::string multicast_address; // separate for each // FIXME slow controls to vary them int local_buffer_size = 100; - int transfer_ms = 1000; + int transfer_period_ms = 1000; int poll_timeout_ms = 100; m_variables.Get("type",type_str); @@ -37,16 +37,18 @@ bool MulticastReceiverSender::Initialise(std::string configfile, DataModel &data } // buffer received messages in a local vector until size exceeds local_buffer_size... m_variables.Get("local_buffer_size",local_buffer_size); - // ... or time since last transfer exceeds transfer_ms - m_variables.Get("transfer_ms",transfer_ms); + // ... or time since last transfer exceeds transfer_period_ms + m_variables.Get("transfer_period_ms",transfer_period_ms); m_variables.Get("poll_timeout_ms",poll_timeout_ms); + ExportConfiguration(); + /* ----------------------------------------- */ /* Socket Setup */ /* ----------------------------------------- */ - int socket = socket(AF_INET, SOCK_DGRAM, 0); - if(socket<=0){ + socket_handle = socket(AF_INET, SOCK_DGRAM, 0); + if(socket_handle<=0){ Log(m_tool_name+": Failed to open multicast socket with error "+strerror(errno),v_error); return false; } @@ -55,7 +57,7 @@ bool MulticastReceiverSender::Initialise(std::string configfile, DataModel &data struct linger l; l.l_onoff = 0; // whether to linger l.l_linger = 0; // seconds to linger for - get_ok = setsockopt(socket, SOL_SOCKET, SO_LINGER, (char*) &l, sizeof(l)); + get_ok = setsockopt(socket_handle, SOL_SOCKET, SO_LINGER, (char*) &l, sizeof(l)); if(get_ok!=0){ Log(m_tool_name+": Failed to set multicast socket linger with error "+strerror(errno),v_error); return false; @@ -65,14 +67,14 @@ bool MulticastReceiverSender::Initialise(std::string configfile, DataModel &data // this is intended to prevent delivery of delayed packets to the wrong application, // but means a new middleman instance won't be able to bind for 30-120 seconds after another closes. int a =1; - get_ok = setsockopt(socket, SOL_SOCKET, SO_REUSEADDR, &a, sizeof(a)); + get_ok = setsockopt(socket_handle, SOL_SOCKET, SO_REUSEADDR, &a, sizeof(a)); if(get_ok!=0){ Log(m_tool_name+": Failed to set multicast socket reuseaddr with error "+strerror(errno),v_error); return false; } // set the socket to non-blocking mode - should be irrelevant as we poll - get_ok = fcntl(socket, F_SETFL, O_NONBLOCK); + get_ok = fcntl(socket_handle, F_SETFL, O_NONBLOCK); if(get_ok!=0){ Log(m_tool_name+": Failed to set multicast socket to non-blocking with error "+strerror(errno),v_warning); } @@ -103,7 +105,7 @@ bool MulticastReceiverSender::Initialise(std::string configfile, DataModel &data */ // to listen we need to bind to the socket - get_ok = (bind(socket, (struct sockaddr*)&addr, addrlen) == 0); + get_ok = (bind(socket_handle, (struct sockaddr*)&addr, addrlen) == 0); if(!get_ok) { Log(m_tool_name+": Failed to bind to multicast listen socket",v_error); return false; @@ -117,7 +119,7 @@ bool MulticastReceiverSender::Initialise(std::string configfile, DataModel &data Log(m_tool_name+": Bad multicast group '"+multicast_address+"'",v_error); return false; } - get_ok = setsockopt(socket, IPPROTO_IP, IP_ADD_MEMBERSHIP, &mreq, sizeof(mreq)); + get_ok = setsockopt(socket_handle, IPPROTO_IP, IP_ADD_MEMBERSHIP, &mreq, sizeof(mreq)); if(get_ok!=0){ Log(m_tool_name+": Failed to join multicast group",v_error); return false; @@ -127,49 +129,34 @@ bool MulticastReceiverSender::Initialise(std::string configfile, DataModel &data /* Thread Setup */ /* ----------------------------------------- */ + // monitoring struct to encapsulate tracking info + std::unique_lock locker(m_data->monitoring_variables_mtx); + m_data->monitoring_variables.emplace(m_tool_name, &monitoring_vars); + thread_args.m_data = m_data; - thread_args.socket = socket; + thread_args.m_tool_name = m_tool_name; + thread_args.monitoring_vars = &monitoring_vars; + thread_args.socket = socket_handle; thread_args.addr = addr; thread_args.addrlen = addrlen; - thread_args.poll = zmq::pollitem_t{NULL, socket, ZMQ_POLLIN, 0}; + thread_args.poll = zmq::pollitem_t{NULL, socket_handle, ZMQ_POLLIN, 0}; thread_args.poll_timeout_ms = poll_timeout_ms; thread_args.local_buffer_size = local_buffer_size; thread_args.in_local_queue = m_data->multicast_buffer_pool.GetNew(local_buffer_size); - thread_args.last_transfer = std::chrononow(); - thread_args.transfer_period_ms = std::chrono::milliseconds{transfer_ms}; + thread_args.last_transfer = std::chrono::steady_clock::now(); + thread_args.transfer_period_ms = std::chrono::milliseconds{transfer_period_ms}; thread_args.in_queue = &m_data->in_multicast_msg_queue; thread_args.in_queue_mtx = &m_data->in_multicast_msg_queue_mtx; if(type_str=="logging"){ - // TODO encapsulate these in a socket-receive monitoring struct, w/ method for turning to json - // can be shared across multicast and both zmq socket receivers - { - thread_args.polls_failed = &m_data->log_polls_failed; - thread_args.msgs_rcvd = &m_data->logs_recvd; - thread_args.rcv_fails = &m_data->log_recv_fails; - thread_args.in_buffer_transfers = &m_data->log_in_buffer_transfers; - thread_args.out_buffer_transfers = &m_data->log_out_buffer_transfers; - } - thread_args.out_queue = &m_data->out_log_msg_queue; thread_args.out_queue_mtx = &m_data->out_log_msg_queue_mtx; - - thread_crashes = &m_data->log_thread_crashes; } else { - { - thread_args.polls_failed = &m_data->mon_polls_failed; - thread_args.msgs_rcvd = &m_data->mons_recvd; - thread_args.rcv_fails = &m_data->mon_recv_fails; - thread_args.in_buffer_transfers = &m_data->mon_in_buffer_transfers; - thread_args.out_buffer_transfers = &m_data->mon_out_buffer_transfers; - } - thread_args.out_queue = &m_data->out_mon_msg_queue; thread_args.out_queue_mtx = &m_data->out_mon_msg_queue_mtx; - - thread_crashes = &m_data->mon_thread_crashes; } - type_str+="_sendreceiver"; // thread needs a unique name - m_data->utils.CreateThread(type_str, &Thread, &thread_args); + + // thread needs a unique name + m_data->utils.CreateThread(type_str+"_sendreceiver", &Thread, &thread_args); m_data->num_threads++; return true; @@ -181,9 +168,9 @@ bool MulticastReceiverSender::Execute(){ if(!thread_args.running){ Log(m_tool_name+" Execute found thread not running!",v_error); Finalise(); - Initialise(); // FIXME should we give up if Initialise returns false? should we set StopLoop to 1? + Initialise(m_configfile, *m_data); // FIXME should we give up if Initialise returns false? should we set StopLoop to 1? // FIXME if restarts > X times in last Y mins, alarm (bypass, shove into DB? send to websocket?) and StopLoop. - ++(*thread_crashes); + ++(monitoring_vars.thread_crashes); } return true; @@ -195,31 +182,36 @@ bool MulticastReceiverSender::Finalise(){ // signal background receiver thread to stop Log(m_tool_name+": Joining receiver thread",v_warning); m_data->utils.KillThread(&thread_args); - Log(m_tool_name+": Finished",v_warning); m_data->num_threads--; std::unique_lock locker(m_data->in_multicast_msg_queue_mtx); - m_data->in_multicast_msg_queue->clear(); + m_data->in_multicast_msg_queue.clear(); locker.unlock(); if(type_str=="logging"){ locker = std::unique_lock(m_data->out_log_msg_queue_mtx); - m_data->out_log_msg_queue->clear(); + m_data->out_log_msg_queue.clear(); } else { locker = std::unique_lock(m_data->out_mon_msg_queue_mtx); - m_data->out_mon_msg_queue->clear(); + m_data->out_mon_msg_queue.clear(); } - get_ok = close(socket); - if(get_ok!=0){ - Log(m_tool_name+": Error closing socket "+strerror(errno),v_error); - return false; + if(socket_handle>0){ + get_ok = close(socket_handle); + if(get_ok!=0){ + Log(m_tool_name+": Error closing socket "+strerror(errno),v_error); + return false; + } } + locker = std::unique_lock(m_data->monitoring_variables_mtx); + m_data->monitoring_variables.erase(m_tool_name); + + Log(m_tool_name+": Finished",v_warning); return true; } -void ReceiveSQL::Thread(Thread_args* arg){ +void MulticastReceiverSender::Thread(Thread_args* arg){ MulticastReceive_args* m_args=reinterpret_cast(arg); DataModel* m_data = m_args->m_data; @@ -228,65 +220,66 @@ void ReceiveSQL::Thread(Thread_args* arg){ // ===================== if(!m_args->in_local_queue->empty() && ((m_args->in_local_queue->size()>m_args->local_buffer_size) || - (m_args->last_transfer - std::chrononow()) > transfer_period_ms) ){ + (m_args->last_transfer - std::chrono::steady_clock::now()) > m_args->transfer_period_ms) ){ - std::unique_lock locker(m_args->in_queue_mtx); + std::clog<m_tool_name<<": adding "<in_local_queue->size() + <<" messages to datamodel"< locker(*m_args->in_queue_mtx); m_args->in_queue->push_back(m_args->in_local_queue); locker.unlock(); - m_args->in_local_queue = m_data->multicast_buffer_pool.GetNew(local_buffer_size); + m_args->in_local_queue = m_data->multicast_buffer_pool.GetNew(m_args->local_buffer_size); - m_args->Log(m_tool_name+": added "+std::to_string(m_args->in_local_queue.size()) - +" messages to datamodel",5); // FIXME streamline - m_args->last_transfer = std::chrononow(); - ++(*m_args->in_buffer_transfers); + m_args->last_transfer = std::chrono::steady_clock::now(); + ++(m_args->monitoring_vars->in_buffer_transfers); } // poll // ==== try { - get_ok = zmq::poll(&m_args->poll, 1, m_args->poll_timeout_ms); + m_args->get_ok = zmq::poll(&m_args->poll, 1, m_args->poll_timeout_ms); } catch(zmq::error_t& err){ // ignore poll aborting due to signals if(zmq_errno()==EINTR) return; - std::cerr<m_tool_name<<" poll caught "<monitoring_vars->polls_failed); m_args->running=false; // FIXME Handle other errors? or just globally via restarting thread? - ++(*m_args->polls_failed); return; } catch(...){ - std::cerr<m_tool_name<<" poll caught "<monitoring_vars->polls_failed); m_args->running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? - ++(*m_args->polls_failed); return; } - if(get_ok<0){ - std::cerr<get_ok<0){ + std::cerr<m_tool_name<<" poll failed with "<monitoring_vars->polls_failed); m_args->running=false; // FIXME Handle other errors? or just globally via restarting thread? - ++(*m_args->polls_failed); return; } // read // ==== if(m_args->poll.revents & ZMQ_POLLIN){ - m_data->Log(m_tool_name+": reading multicast message",10); // FIXME streamline + std::clog<m_tool_name<<": receiving message"<get_ok = recvfrom(m_args->socket, m_args->message, 655355, 0, &m_args->addr, &m_args->addrlen); + m_args->get_ok = recvfrom(m_args->socket, m_args->message, 655355, 0, (struct sockaddr*)&m_args->addr, &m_args->addrlen); if(m_args->get_ok <= 0){ - ++(*m_args->rcv_fails); + ++(m_args->monitoring_vars->rcv_fails); // FIXME better logging - std::cerr<addr->sin_addr)} // FIXME is this valid on failure? + std::cerr<m_tool_name<<": Failed to receive message from " + <addr.sin_addr) // FIXME is this valid on failure? <<" with error "<msgs_rcvd); - m_data->Log(m_tool_name+": Received multicast message '"+std::string(m_args->message) - +"' from "+std::string{inet_ntoa(&m_args->addr->sin_addr)},12); // FIXME streamline + ++(m_args->monitoring_vars->msgs_rcvd); + //m_data->Log(m_tool_name+": Received multicast message '"+std::string(m_args->message) + // +"' from "+std::string{inet_ntoa(&m_args->addr->sin_addr)},12); // FIXME streamline m_args->in_local_queue->emplace_back(m_args->message); @@ -295,33 +288,34 @@ void ReceiveSQL::Thread(Thread_args* arg){ // write // ===== - if(!m_args->out_local_queue.empty()){ + if(m_args->out_i < m_args->out_local_queue.size()){ + + std::clog<m_tool_name<<": sending message"<out_local_queue[m_args->out_i++]; // always increment, even if error // send it - int cnt = sendto(m_args->socket, message.c_str(), message.length()+1, 0, &m_args->addr, m_args->addrlen); + int cnt = sendto(m_args->socket, message.c_str(), message.length()+1, 0, (struct sockaddr*)&m_args->addr, m_args->addrlen); // check success if(cnt < 0){ - m_data->Log(m_tool_name+": Error sending multicast message: "+strerror(errno),v_error); // FIXME ensure this isn't circular - m_args->out_local_queue.pop_front(); // FIXME discard it anyway? or maybe don't until it succeeds? - ++(*m_args->send_fails); - + //m_data->Log(m_tool_name+": Error sending multicast message: "+strerror(errno),v_error); // FIXME ensure this isn't circular + ++(m_args->monitoring_vars->send_fails); } else { - m_args->out_local_queue.pop_front(); - ++(*m_args->msgs_sent); - + ++(m_args->monitoring_vars->msgs_sent); } } else { + // else see if there are any in datamodel to grab - std::unique_lock locker(m_args->out_queue_mtx); + std::unique_lock locker(*m_args->out_queue_mtx); if(!m_args->out_queue->empty()){ - std::swap(m_args->out_queue, m_args->out_local_queue); - ++(*m_args->out_buffer_transfers); + std::clog<m_tool_name<<": receiving fetching new outgoing logging messages"<out_queue, m_args->out_local_queue); + ++(m_args->monitoring_vars->out_buffer_transfers); + m_args->out_i=0; } locker.unlock(); diff --git a/UserTools/MulticastReceiverSender/MulticastReceiverSender.h b/UserTools/MulticastReceiverSender/MulticastReceiverSender.h index b575371..0590dac 100644 --- a/UserTools/MulticastReceiverSender/MulticastReceiverSender.h +++ b/UserTools/MulticastReceiverSender/MulticastReceiverSender.h @@ -13,6 +13,7 @@ #include "Tool.h" #include "DataModel.h" +#include "MulticastReceiveMonitoring.h" /** * \class MulticastReceiverSender @@ -27,7 +28,9 @@ // class for things passed to multicast listener thread struct MulticastReceive_args : public Thread_args { + std::string m_tool_name; DataModel* m_data; + MulticastReceiveMonitoring* monitoring_vars; socklen_t addrlen; struct sockaddr_in addr; int socket; @@ -36,13 +39,14 @@ struct MulticastReceive_args : public Thread_args { char message[655355]; // theoretical maximum UDP buffer size - size also hard-coded in thread int get_ok; size_t local_buffer_size; - std::vector in_local_queue; + std::vector* in_local_queue; std::vector out_local_queue; + size_t out_i=0; - std::vector* in_queue; - std::mutex in_queue_mtx; - std::deque* out_queue; - std::mutex out_queue_mtx; + std::vector*>* in_queue; + std::mutex* in_queue_mtx; + std::vector* out_queue; + std::mutex* out_queue_mtx; std::chrono::time_point last_transfer; std::chrono::milliseconds transfer_period_ms; @@ -60,8 +64,12 @@ class MulticastReceiverSender: public Tool { private: static void Thread(Thread_args* args); MulticastReceive_args thread_args; + MulticastReceiveMonitoring monitoring_vars; - int get_ok; /// FIXME check usage + std::string type_str; // "logging" or "monitoring" + int socket_handle; + int get_ok; + std::atomic* thread_crashes; }; diff --git a/UserTools/MulticastWorkers/MulticastWorkerMonitoring.h b/UserTools/MulticastWorkers/MulticastWorkerMonitoring.h new file mode 100644 index 0000000..e0ff75a --- /dev/null +++ b/UserTools/MulticastWorkers/MulticastWorkerMonitoring.h @@ -0,0 +1,28 @@ +#ifndef MulticastWorkerMonitoring_H +#define MulticastWorkerMonitoring_H + +#include "MonitoringVariables.h" + +class MulticastWorkerMonitoring : public MonitoringVariables { + public: + MulticastWorkerMonitoring(){}; + ~MulticastWorkerMonitoring(){}; + + std::atomic jobs_failed; + std::atomic jobs_completed; + std::atomic msgs_processed; // each job concatenates a batch of messages; this sums all batches + std::atomic thread_crashes; // restarts of tool worker thread (main thread found reader thread 'running' was false) + + std::string toJSON(){ + + std::string s="{\"jobs_failed\":"+std::to_string(jobs_failed.load()) + +",\"jobs_completed\":"+std::to_string(jobs_completed.load()) + +",\"msgs_processed\":"+std::to_string(msgs_processed.load()) + +",\"thread_crashes\":"+std::to_string(thread_crashes.load()) + +"}"; + + return s; + } +}; + +#endif diff --git a/UserTools/MulticastWorkers/MulticastWorkers.cpp b/UserTools/MulticastWorkers/MulticastWorkers.cpp index 97ca46a..ff369e5 100644 --- a/UserTools/MulticastWorkers/MulticastWorkers.cpp +++ b/UserTools/MulticastWorkers/MulticastWorkers.cpp @@ -5,23 +5,29 @@ MulticastWorkers::MulticastWorkers():Tool(){} bool MulticastWorkers::Initialise(std::string configfile, DataModel &data){ - if(configfile!="") m_variables.Initialise(configfile); + InitialiseTool(data); + m_configfile = configfile; + InitialiseConfiguration(configfile); //m_variables.Print(); - m_data= &data; - m_log= m_data->Log; - - // allocate ehhh 60% of the CPU to multicast workers - int max_workers= (double(std::thread::hardware_concurrency())*0.6); +// // allocate ehhh 60% of the CPU to multicast workers +// int max_workers= (double(std::thread::hardware_concurrency())*0.6); if(!m_variables.Get("verbose",m_verbose)) m_verbose=1; // m_variables.Get("max_workers",max_workers); + ExportConfiguration(); + // potentially we will have a dedicated worker pool for multicast, but for now, // just one created and managed by JobManager Tool //job_manager = new WorkerPoolManager(multicast_jobs, &max_workers, 0, 0, 0, true, true); + // monitoring struct to encapsulate tracking info + std::unique_lock locker(m_data->monitoring_variables_mtx); + m_data->monitoring_variables.emplace(m_tool_name, &monitoring_vars); + thread_args.m_data = m_data; + thread_args.monitoring_vars = &monitoring_vars; m_data->utils.CreateThread("multicast_job_distributor", &Thread, &thread_args); // thread needs a unique name m_data->num_threads++; @@ -35,8 +41,8 @@ bool MulticastWorkers::Execute(){ if(!thread_args.running){ Log(m_tool_name+" Execute found thread not running!",v_error); Finalise(); - Initialise(); // FIXME should we give up if Initialise returns false? should we set StopLoop to 1? - ++m_data->multicast_job_distributor_thread_crashes; + Initialise(m_configfile, *m_data); // FIXME should we give up if Initialise returns false? should we set StopLoop to 1? + ++(monitoring_vars.thread_crashes); } return true; @@ -47,19 +53,22 @@ bool MulticastWorkers::Finalise(){ // signal job distributor thread to stop Log(m_tool_name+": Joining receiver thread",v_warning); m_data->utils.KillThread(&thread_args); - Log(m_tool_name+": Finished",v_warning); m_data->num_threads--; // this will invoke kill on the WorkerPoolManager thread creating worker threads, as well as all workers. //delete job_manager; + std::unique_lock locker(m_data->monitoring_variables_mtx); + m_data->monitoring_variables.erase(m_tool_name); + + Log(m_tool_name+": Finished",v_warning); return true; } void MulticastWorkers::Thread(Thread_args* args){ - MulticastJobDistributor_args* m_args = reinterpret_cast(args); + MulticastJobDistributor_args* m_args = dynamic_cast(args); m_args->local_msg_queue.clear(); // grab any batches of logging/monitoring messages @@ -73,13 +82,14 @@ void MulticastWorkers::Thread(Thread_args* args){ for(int i=0; ilocal_msg_queue.size(); ++i){ // add a new Job to the job queue to process this data - Job* the_job = m_args->job_pool.GetNew(&m_args->m_data->job_pool, "multicast_worker"); + Job* the_job = m_args->m_data->job_pool.GetNew("multicast_worker"); + the_job->out_pool = &m_args->m_data->job_pool; if(the_job->data == nullptr){ // on first creation of the job, make it a JobStruct to encapsulate its data // N.B. Pool::GetNew will only invoke the constructor if this is a new instance, // (not if it's been used before and then returned to the pool) // so don't pass job-specific variables to the constructor - the_job->data = m_args->job_struct_pool.GetNew(&m_args->job_struct_pool, m_args->m_data); + the_job->data = m_args->job_struct_pool.GetNew(&m_args->job_struct_pool, m_args->m_data, m_args->monitoring_vars); } else { // this should never happen as jobs should return their args to the pool std::cerr<<"Multicast Job with non-null data pointer!"<(the_job->data); + MulticastJobStruct* job_data = static_cast(the_job->data); job_data->msg_buffer = m_args->local_msg_queue[i]; + job_data->monitoring_vars = m_args->monitoring_vars; + job_data->m_job_name = "multicast_worker"; the_job->func = MulticastMessageJob; the_job->fail_func = MulticastMessageFail; //multicast_jobs.AddJob(the_job); - m_data->job_queue.AddJob(the_job); + m_args->m_data->job_queue.AddJob(the_job); } - return true; + return; } @@ -109,18 +121,20 @@ void MulticastWorkers::MulticastMessageFail(void*& arg){ // safety check in case the job somehow fails after returning its args to the pool if(arg==nullptr){ + std::cerr<<"multicast worker fail with no args"<(arg); - ++(*m_args->m_data->multicast_worker_job_fails); + MulticastJobStruct* m_args=static_cast(arg); + std::cerr<m_job_name<<" failure"<monitoring_vars->jobs_failed); // return the vector of string buffers to the pool for re-use by MulticastReceiverSender Tool m_args->msg_buffer->clear(); m_args->m_data->multicast_buffer_pool.Add(m_args->msg_buffer); // return our job args to the pool - m_args->m_pool.Add(m_args); + m_args->m_pool->Add(m_args); m_args = nullptr; // clear the local m_args variable... not strictly necessary arg = nullptr; // clear the job 'data' member variable @@ -133,15 +147,16 @@ void MulticastWorkers::MulticastMessageFail(void*& arg){ // 2. submit the data we already have // 3. make a new job for the remaining data + return; } // ««-------------- ≪ °◇◆◇° ≫ --------------»» // Each job takes a vector of messages and converts them into a suitable object, // then locks and inserts that into a datamodel vector for the database workers -void MulticastWorkers::MulticastMessageJob(void*& arg){ +bool MulticastWorkers::MulticastMessageJob(void*& arg){ - MulticastJobStruct* m_args=reinterpret_cast(arg); + MulticastJobStruct* m_args=static_cast(arg); // most efficient way to do insertion would seem to be via jsonb_to_recordset, which allows batching queries, // query optimisation similar to 'unnest', and avoids the overhead of parsing the JSON: e.g. @@ -177,64 +192,65 @@ void MulticastWorkers::MulticastMessageJob(void*& arg){ switch(query_topic{next_msg[10]}){ case query_topic::logging: - m_args->out_buffer = m_args->logging_buffer; + m_args->out_buffer = &m_args->logging_buffer; break; case query_topic::monitoring: - m_args->out_buffer = m_args->monitoring_buffer; + m_args->out_buffer = &m_args->monitoring_buffer; break; case query_topic::rootplot: - m_args->out_buffer = m_args->rootplot_buffer; + m_args->out_buffer = &m_args->rootplot_buffer; break; case query_topic::plotlyplot: - m_args->out_buffer = m_args->plotlyplot_buffer; + m_args->out_buffer = &m_args->plotlyplot_buffer; break; default: continue; // FIXME unknown topic: error log it. } - if(m_args->out_buffer.length()>1) m_args->out_buffer += ", "; - m_args->out_buffer += next_msg; + if(m_args->out_buffer->length()>1) (*m_args->out_buffer) += ", "; + (*m_args->out_buffer) += next_msg; - ++(*m_args->m_data->n_multicasts_processed); // FIXME add split by topic + ++(m_args->monitoring_vars->msgs_processed); } // pass into datamodel for DatabaseWorkers if(m_args->logging_buffer.length()!=1){ m_args->logging_buffer += "]"; - std::unique_lock locker(m_args->log_query_queue_mtx); + std::unique_lock locker(m_args->m_data->log_query_queue_mtx); m_args->m_data->log_query_queue.push_back(m_args->logging_buffer); } if(m_args->monitoring_buffer.length()!=1){ m_args->monitoring_buffer += "]"; - std::unique_lock locker(m_args->mon_query_queue_mtx); + std::unique_lock locker(m_args->m_data->mon_query_queue_mtx); m_args->m_data->mon_query_queue.push_back(m_args->monitoring_buffer); } if(m_args->rootplot_buffer.length()!=1){ m_args->rootplot_buffer += "]"; - std::unique_lock locker(m_args->rootplot_query_queue_mtx); + std::unique_lock locker(m_args->m_data->rootplot_query_queue_mtx); m_args->m_data->rootplot_query_queue.push_back(m_args->rootplot_buffer); } if(m_args->plotlyplot_buffer.length()!=1){ m_args->plotlyplot_buffer += "]"; - std::unique_lock locker(m_args->plotlyplot_query_queue_mtx); - m_args->m_data->plotlyplot_query_queue_mtx.push_back(m_args->plotlyplot_buffer); + std::unique_lock locker(m_args->m_data->plotlyplot_query_queue_mtx); + m_args->m_data->plotlyplot_query_queue.push_back(m_args->plotlyplot_buffer); } // return the vector of string buffers to the pool for re-use by MulticastReceiverSender Tool m_args->msg_buffer->clear(); m_args->m_data->multicast_buffer_pool.Add(m_args->msg_buffer); - ++(*m_args->m_data->multicast_worker_job_successes); + std::cerr<m_job_name<<" completed"<monitoring_vars->jobs_completed); - m_args->m_pool.Add(m_args); // return our job args to the job args struct pool + m_args->m_pool->Add(m_args); // return our job args to the job args struct pool m_args = nullptr; // clear the local m_args variable... not strictly necessary arg = nullptr; // clear the job 'data' member variable - return; + return true; } @@ -328,7 +344,7 @@ void MulticastWorkers::MulticastMessageJob(void* arg){ //================== - MulticastJobStruct* m_args=reinterpret_cast(arg); + MulticastJobStruct* m_args=static_cast(arg); // v0: pre-populate query with base m_args->out_buffer = m_args->query_base; diff --git a/UserTools/MulticastWorkers/MulticastWorkers.h b/UserTools/MulticastWorkers/MulticastWorkers.h index d6506c0..fa3a738 100644 --- a/UserTools/MulticastWorkers/MulticastWorkers.h +++ b/UserTools/MulticastWorkers/MulticastWorkers.h @@ -5,7 +5,7 @@ #include "Tool.h" #include "DataModel.h" - +#include "MulticastWorkerMonitoring.h" /** * \class MulticastWorkers @@ -20,19 +20,26 @@ // class for things passed to multicast worker threads struct MulticastJobStruct { - MulticastJobStruct(Pool* pool, DataModel* data) : m_pool(pool) m_data(data){}; - Pool* m_pool; + MulticastJobStruct(Pool* pool, DataModel* data, MulticastWorkerMonitoring* mon) : m_pool(pool), m_data(data), monitoring_vars(mon){}; DataModel* m_data; + MulticastWorkerMonitoring* monitoring_vars; + Pool* m_pool; + std::string m_job_name; std::vector* msg_buffer; - std::string out_buffer; + std::string* out_buffer; + std::string logging_buffer; + std::string monitoring_buffer; + std::string rootplot_buffer; + std::string plotlyplot_buffer; }; struct MulticastJobDistributor_args : Thread_args { DataModel* m_data; + MulticastWorkerMonitoring* monitoring_vars; std::vector*> local_msg_queue; // swap with datamodel and then pass out to jobs - Pool job_struct_pool(true, 1000, 100); ///< pool for job objects used by worker threads + Pool job_struct_pool{true, 1000, 100}; ///< pool for job objects used by worker threads }; @@ -45,10 +52,11 @@ class MulticastWorkers: public Tool { bool Finalise(); ///< Finalise funciton used to clean up resorces. private: - static bool Thread(Thread_args* args); ///< job distributor thread function that pulls batches of multicast messages from upstream and passes them to the job queue + static void Thread(Thread_args* args); ///< job distributor thread function that pulls batches of multicast messages from upstream and passes them to the job queue MulticastJobDistributor_args thread_args; ///< args for the child thread that produces and distributes jobs to the worker farm + MulticastWorkerMonitoring monitoring_vars; - static void MulticastMessageJob(void*& arg); ///< job function that prepares a batch of multicast messages for DB entry + static bool MulticastMessageJob(void*& arg); ///< job function that prepares a batch of multicast messages for DB entry static void MulticastMessageFail(void*& arg); ///< job fail function, perform cleanup to return multicast buffer and job args struct to their respective Pools // for now use shared ones in datamodel diff --git a/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.cpp b/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.cpp index 718a107..3a8179b 100644 --- a/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.cpp +++ b/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.cpp @@ -1,16 +1,15 @@ -#include "ReadReceiverReplySender.h" +#include "ReadQueryReceiverReplySender.h" -ReadReceiverReplySender::ReadReceiverReplySender():Tool(){} +ReadQueryReceiverReplySender::ReadQueryReceiverReplySender():Tool(){} //FIXME call it readqueryreceviverandreplysender -bool ReadReceiverReplySender::Initialise(std::string configfile, DataModel &data){ +bool ReadQueryReceiverReplySender::Initialise(std::string configfile, DataModel &data){ - if(configfile!="") m_variables.Initialise(configfile); + InitialiseTool(data); + m_configfile = configfile; + InitialiseConfiguration(configfile); //m_variables.Print(); - m_data= &data; - m_log= m_data->Log; - if(!m_variables.Get("verbose",m_verbose)) m_verbose=1; /* ----------------------------------------- */ @@ -22,20 +21,22 @@ bool ReadReceiverReplySender::Initialise(std::string configfile, DataModel &data int rcv_timeout_ms=500; int snd_timeout_ms=500; int poll_timeout_ms=500; - int rcv_hwm=10000; + int rcv_hwm=10000; // FIXME sufficient? int conns_backlog=1000; // FIXME sufficient? int local_buffer_size = 200; int transfer_period_ms = 200; - m_variables.Get("snd_timeout",snd_timeout_ms); - m_variables.Get("rcv_timeout",rcv_timeout_ms); - m_variables.Get("poll_timeout_ms",poll_timeout_ms); m_variables.Get("port_name", port_name); m_variables.Get("rcv_hwm", rcv_hwm); // max num outstanding messages in receive buffer m_variables.Get("conns_backlog", conns_backlog); // max num oustanding connection requests + m_variables.Get("poll_timeout_ms",poll_timeout_ms); + m_variables.Get("snd_timeout_ms",snd_timeout_ms); + m_variables.Get("rcv_timeout_ms",rcv_timeout_ms); m_variables.Get("local_buffer_size", local_buffer_size); m_variables.Get("transfer_period_ms", transfer_period_ms); + ExportConfiguration(); + /* ----------------------------------------- */ /* Socket Setup */ /* ----------------------------------------- */ @@ -67,30 +68,31 @@ bool ReadReceiverReplySender::Initialise(std::string configfile, DataModel &data } */ - // make items to poll the input and output sockets + // add the socket to the datamodel for the SocketManager, which will handle making new connections to clients + std::unique_lock locker(m_data->managed_sockets_mtx); + m_data->managed_sockets[port_name] = managed_socket; /* ----------------------------------------- */ /* Thread Setup */ /* ----------------------------------------- */ + // monitoring struct to encapsulate tracking info + locker =std::unique_lock(m_data->monitoring_variables_mtx); + m_data->monitoring_variables.emplace(m_tool_name, &monitoring_vars); + thread_args.m_data = m_data; + thread_args.m_tool_name = m_tool_name; + thread_args.monitoring_vars = &monitoring_vars; thread_args.socket = managed_socket->socket; // FIXME get from struct. - thread_args.socket_mtx = managed_socket->socket_mtx; // FIXME get from struct. For sharing socket with SocketManager + thread_args.socket_mtx = &managed_socket->socket_mtx; // FIXME get from struct. For sharing socket with SocketManager thread_args.poll_timeout_ms = poll_timeout_ms; - thread_args.polls.emplace_back(*socket,0,ZMQ_POLLIN,0); - thread_args.polls.emplace_back(*socket,0,ZMQ_POLLOUT,0); - thread_args.in_local_queue = m_data->querybatch_pool.GetNew(); - thread_args.in_local_queue.reserve(local_buffer_size); + thread_args.polls.emplace_back(*managed_socket->socket,0,ZMQ_POLLIN,0); + thread_args.polls.emplace_back(*managed_socket->socket,0,ZMQ_POLLOUT,0); + thread_args.in_local_queue = m_data->querybatch_pool.GetNew(local_buffer_size); + thread_args.make_new = true; thread_args.local_buffer_size = local_buffer_size; - thread_args.transfer_period_ms = transfer_period_ms; + thread_args.transfer_period_ms = std::chrono::milliseconds{transfer_period_ms}; - // add the socket to the datamodel for the SocketManager, which will handle making new connections to clients - std::unique_lock locker(m_data->managed_sockets_mtx); - m_data->managed_sockets[port_name] = managed_socket; - locker.unlock(); - - m_args->in_local_queue = m_data->rdmsg_buffer_pool.GetNew(local_buffer_size); - thread_args.make_new = true; m_data->utils.CreateThread("readrep_sendreceiver", &Thread, &thread_args); // thread needs a unique name m_data->num_threads++; @@ -98,34 +100,25 @@ bool ReadReceiverReplySender::Initialise(std::string configfile, DataModel &data } -bool ReadReceiverReplySender::Execute(){ +bool ReadQueryReceiverReplySender::Execute(){ if(!thread_args.running){ Log(m_tool_name+" Execute found thread not running!",v_error); Finalise(); - Initialise(); // FIXME should we give up if Initialise returns false? should we set StopLoop to 1? - ++m_data->read_rcv_thread_crashes; - } - - if(m_data->managed_sockets.count(port_name)){ - std::unique_lock lock(m_data->managed_sockets_mtx); - ManagedSocket* sock = m_data->managed_sockets[port_name]; - m_data->managed_sockets.erase(port_name); - locker.unlock(); - if(sock->socket) delete sock->socket; // destructor closes socket - delete sock; + Initialise(m_configfile, *m_data); // FIXME should we give up if Initialise returns false? should we set StopLoop to 1? + ++(monitoring_vars.thread_crashes); } return true; } -bool ReadReceiverReplySender::Finalise(){ +bool ReadQueryReceiverReplySender::Finalise(){ // signal background receiver thread to stop Log(m_tool_name+": Joining receiver thread",v_warning); m_data->utils.KillThread(&thread_args); - Log(m_tool_name+": Finished",v_warning); + std::cerr<<"ReadReceiver thread terminated"<num_threads--; // FIXME ensure we don't interfere with SocketManager? Better to leave that to do deletion in its destructor? @@ -137,35 +130,48 @@ bool ReadReceiverReplySender::Finalise(){ } */ + if(m_data->managed_sockets.count(port_name)){ + std::unique_lock locker(m_data->managed_sockets_mtx); + ManagedSocket* sock = m_data->managed_sockets[port_name]; + m_data->managed_sockets.erase(port_name); + locker.unlock(); + if(sock->socket) delete sock->socket; // destructor closes socket + delete sock; + } + + std::unique_lock locker(m_data->monitoring_variables_mtx); + m_data->monitoring_variables.erase(m_tool_name); + + Log(m_tool_name+": Finished",v_warning); return true; } // ««-------------- ≪ °◇◆◇° ≫ --------------»» -void ReadReceiverReplySender::Thread(Thread_args* args){ +void ReadQueryReceiverReplySender::Thread(Thread_args* args){ - ReadReceiverReplySender_args* m_args = reinterpret_cast(args); + ReadQueryReceiverReplySender_args* m_args = reinterpret_cast(args); // transfer to datamodel // ===================== - if(m_args->in_local_queue.size() >= m_args->local_buffer_size || - (m_args->last_transfer - std::chrononow()) > transfer_period_ms){ + if(m_args->in_local_queue->queries.size() >= m_args->local_buffer_size || + (m_args->last_transfer - std::chrono::steady_clock::now()) > m_args->transfer_period_ms){ - if(!make_new) pop_back(); - if(!m_args->in_local_queue.empty()){ + if(!m_args->make_new) m_args->in_local_queue->queries.pop_back(); + if(!m_args->in_local_queue->queries.empty()){ + + std::clog<m_tool_name<<": added "<in_local_queue->queries.size() + <<" messages to datamodel"< locker(m_args->m_data->read_msg_queue_mtx); m_args->m_data->read_msg_queue.push_back(m_args->in_local_queue); locker.unlock(); - m_args->in_local_queue = m_args->m_data->querybatch_pool.GetNew(); - m_args->in_local_queue.reserve(m_args->local_buffer_size); + m_args->in_local_queue = m_args->m_data->querybatch_pool.GetNew(m_args->local_buffer_size); - m_args->m_data->Log(m_tool_name+": added "+std::to_string(next_index) - +" messages to datamodel",5); // FIXME better logging - m_args->last_transfer = std::chrononow(); + m_args->last_transfer = std::chrono::steady_clock::now(); m_args->make_new=true; - ++(*m_args->m_data->readrep_in_buffer_transfers); + ++(m_args->monitoring_vars->in_buffer_transfers); } } @@ -173,125 +179,165 @@ void ReadReceiverReplySender::Thread(Thread_args* args){ // poll // ==== try { - std::unique_lock lock(m_args->socket_mtx); - get_ok = zmq::poll(&m_args->polls, 2, m_args->poll_timeout_ms); + m_args->get_ok=0; + std::unique_lock locker(*m_args->socket_mtx); + m_args->get_ok = zmq::poll(m_args->polls.data(), 2, m_args->poll_timeout_ms); } catch(zmq::error_t& err){ // ignore poll aborting due to signals - if(zmq_errno()==EINTR) return; - std::cerr<running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? - ++(*m_args->m_data->readrep_polls_failed); + if(zmq_errno()==EINTR) return; // this is probably fine + //std::cerr<m_tool_name<<" poll caught "<monitoring_vars->polls_failed); +// m_args->running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? + return; + } + catch(std::exception& err){ + std::cerr<m_tool_name<<" poll caught "<monitoring_vars->polls_failed); +// m_args->running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? return; - } // FIXME catch non-zmq errors? can we handle them any better? - catch(...){ - std::cerr<running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? - ++(*m_args->m_data->readrep_polls_failed); + } catch(...){ + std::cerr<m_tool_name<<" poll caught "<monitoring_vars->polls_failed); +// m_args->running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? return; } - if(get_ok<0){ - std::cerr<running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? - ++(*m_args->m_data->readrep_polls_failed); + if(m_args->get_ok<0){ + std::cerr<m_tool_name<<" poll failed with "<monitoring_vars->polls_failed); +// m_args->running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? return; } // read // ==== if(m_args->polls[0].revents & ZMQ_POLLIN){ - m_data->Log(">>> got a read query from client",3); // FIXME better logging + std::clog<m_tool_name<<" receiving message"<make_new){ - m_args->in_local_queue.emplace_back(); + m_args->in_local_queue->queries.emplace_back(); m_args->make_new = false; } - ZmqQuery& msg_buf = m_args->in_local_queue.back().queries; - msg_buf.resize(4); + ZmqQuery& msg_buf = m_args->in_local_queue->queries.back(); + msg_buf.parts.resize(4); // received parts are [client, topic, msg_id, query] // reorder parts on receipt as client and msg_id will be left untouched and re-used for response static constexpr char part_order[4] = {0,2,1,3}; - std::unique_lock locker(m_args->socket_mtx); - for(m_args->msg_parts=0; m_args->msg_parts<4; ++m_args->msg_parts){ - m_args->get_ok = m_args->socket->recv(&msg_buf[part_order[m_args->msg_parts]]); - if(!m_args->get_ok || !msg_buf[part_order[m_args->msg_parts]].more()) break; - } - - // if there are more than 4 parts, read the remainder to flush the buffer, but discard the message - if(m_args->get_ok && msg_buf[3].more()){ - while(true){ - m_args->socket->recv(&m_args->msg_discard); - ++m_args->msg_parts; + try { + + std::unique_lock locker(*m_args->socket_mtx); + for(m_args->msg_parts=0; m_args->msg_parts<4; ++m_args->msg_parts){ + m_args->get_ok = m_args->socket->recv(&msg_buf[part_order[m_args->msg_parts]]); + if(!m_args->get_ok || !msg_buf[part_order[m_args->msg_parts]].more()) break; } - } - locker.unlock(); - - // if the read failed, discard the message - if(!m_args->get_ok){ - std::cerr<msg_parts<m_data->readrep_rcv_fails); - return; - } - - // if there weren't 4 parts, discard the message - if(m_args->msg_parts!=4){ - std::cerr<msg_parts<<" part message"<m_data->readrep_bad_msgs); - return; + locker.unlock(); + + // if there are more than 4 parts, read the remainder to flush the buffer, but discard the message + if(m_args->get_ok && msg_buf[3].more()){ + while(true){ + m_args->socket->recv(&m_args->msg_discard); + ++m_args->msg_parts; + } + } + + // if the read failed, discard the message + if(!m_args->get_ok){ + + std::cerr<m_tool_name<<" receive failed with "<monitoring_vars->rcv_fails); + + // if there weren't 4 parts, discard the message + } else if(m_args->msg_parts!=4){ + + std::cerr<m_tool_name<<": Unexpected "<msg_parts<<" part message"<monitoring_vars->bad_msgs); + + // else success + } else { + + m_args->make_new=true; + ++(m_args->monitoring_vars->msgs_rcvd); + + } + + } catch(zmq::error_t& err){ + // receive aborted due to signals? + if(zmq_errno()==EINTR) return; // FIXME this is probably not appropriate: should resume receive? + std::cerr<m_tool_name<<" receive caught "<monitoring_vars->rcv_fails); +// m_args->running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? + } catch(std::exception& err){ + std::cerr<m_tool_name<<" receive caught "<monitoring_vars->rcv_fails); +// m_args->running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? + } catch(...){ + std::cerr<m_tool_name<<" receive caught "<monitoring_vars->rcv_fails); +// m_args->running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? } - // else success - m_args->make_new=true; - ++(*m_args->m_data->readrep_msgs_rcvd); - } // else no messages from clients // write // ===== - m_args->m_data->Log("Size of reply queue is "+ - (m_args->out_local_queue ? std::to_string(m_args->out_local_queue.size()) : std::string{"0"}),10); // FIXME + //m_args->m_data->Log("Size of reply queue is "+ + // (m_args->out_local_queue ? std::to_string(m_args->out_local_queue.size()) : std::string{"0"}),10); // FIXME // send next response message, if we have one in the queue - if(!m_args->out_local_queue!=nullptr && m_args->out_iout_local_queue->queries.size()){ + if(m_args->out_local_queue!=nullptr && m_args->out_iout_local_queue->queries.size()){ // check we had a listener ready if(m_args->polls[1].revents & ZMQ_POLLOUT){ + std::clog<m_tool_name<<" sending reply"<out_local_queue->queries[m_args->out_i++]; // FIXME maybe don't pop (increment out_i) until send succeeds? // FIXME maybe impelement 'retries' mechanism as previously? - std::unique_lock locker(m_args->socket_mtx); try { + + std::unique_lock locker(*m_args->socket_mtx); for(size_t i=0; iget_ok = m_args->socket->send(rep[i], ZMQ_SNDMORE); if(!m_args->get_ok) break; } if(m_args->get_ok) m_args->get_ok = m_args->socket->send(rep[rep.size()-1]); - } catch(zmq::exception_t& e){ - std::cerr<get_ok){ - // remove from the to-send queue - ++(*m_args->m_data->readrep_reps_sent); - //m_args->out_local_queue.pop_front(); // FIXME if we didn't do it before + locker.unlock(); - } else { - std::cerr<m_data->readrep_rep_send_fails); // FIXME or move into below if we retry? or track both? - /* - if(next_msg.retries>=max_send_attempts){ - resp_queue.erase(resp_queue.begin()->first); - } else { - ++next_msg.retries; + if(!m_args->get_ok){ + std::cerr<m_tool_name<<": send failed with "<monitoring_vars->send_fails); // FIXME or move into below if we retry? or track both? + /* + if(next_msg.retries>=max_send_attempts){ + resp_queue.erase(resp_queue.begin()->first); + } else { + ++next_msg.retries; + } + */ + return; } - */ + + // else success + ++(m_args->monitoring_vars->msgs_sent); + + } catch(zmq::error_t& err){ + // send aborted due to signals? + if(zmq_errno()==EINTR) return; // FIXME is this appropriate here? + std::cerr<m_tool_name<<" send caught "<monitoring_vars->send_fails); +// m_args->running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? + } catch(std::exception& e){ + std::cerr<m_tool_name<<" send caught "<monitoring_vars->send_fails); +// m_args->running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? + } catch(...){ + std::cerr<m_tool_name<<" send caught "<monitoring_vars->send_fails); +// m_args->running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? } } // else no available listeners @@ -302,6 +348,8 @@ void ReadReceiverReplySender::Thread(Thread_args* args){ std::unique_lock locker(m_args->m_data->query_replies_mtx); if(!m_args->m_data->query_replies.empty()){ + std::clog<m_tool_name<<": fetching new replies"<out_local_queue!=nullptr){ m_args->m_data->querybatch_pool.Add(m_args->out_local_queue); @@ -312,15 +360,14 @@ void ReadReceiverReplySender::Thread(Thread_args* args){ m_args->out_local_queue = m_args->m_data->query_replies.front(); m_args->m_data->query_replies.pop_front(); - *(m_args->m_data->readrep_out_buffer_transfers; + ++(m_args->monitoring_vars->out_buffer_transfers); // start sending from the beginning m_args->out_i=0; } locker.unlock(); - } - return true; + return; } diff --git a/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.h b/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.h index a9f4629..a179a4a 100644 --- a/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.h +++ b/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.h @@ -1,14 +1,14 @@ -#ifndef ReadReceiverReplySender_H -#define ReadReceiverReplySender_H +#ifndef ReadQueryReceiverReplySender_H +#define ReadQueryReceiverReplySender_H #include #include "Tool.h" #include "DataModel.h" - +#include "ReadReceiveMonitoring.h" /** - * \class ReadReceiverReplySender + * \class ReadQueryReceiverReplySender * * This Tool gets read queries from a ZMQ ROUTER socket and send replies as well as write query acknowledgements. * @@ -17,9 +17,11 @@ * Contact: marcus.o-flaherty@warwick.ac.uk */ -struct ReadReceiverReplySender_args : public Thread_args { +struct ReadQueryReceiverReplySender_args : public Thread_args { + std::string m_tool_name; DataModel* m_data; + ReadReceiveMonitoring* monitoring_vars; zmq::socket_t* socket=nullptr; std::mutex* socket_mtx; // for sharing the socket with ServicesManager Tool for finding clients @@ -41,17 +43,18 @@ struct ReadReceiverReplySender_args : public Thread_args { }; -class ReadReceiverReplySender: public Tool { +class ReadQueryReceiverReplySender: public Tool { public: - ReadReceiverReplySender(); ///< Simple constructor + ReadQueryReceiverReplySender(); ///< Simple constructor bool Initialise(std::string configfile,DataModel &data); ///< Initialise Function for setting up Tool resorces. @param configfile The path and name of the dynamic configuration file to read in. @param data A reference to the transient data class used to pass information between Tools. bool Execute(); ///< Executre function used to perform Tool perpose. bool Finalise(); ///< Finalise funciton used to clean up resorces. private: static void Thread(Thread_args* args); - ReadReceiverReplySender_args thread_args; + ReadQueryReceiverReplySender_args thread_args; + ReadReceiveMonitoring monitoring_vars; std::string port_name; // name by which clients advertise sockets for sending read queries to the DB diff --git a/UserTools/ReadQueryReceiverReplySender/ReadReceiveMonitoring.h b/UserTools/ReadQueryReceiverReplySender/ReadReceiveMonitoring.h new file mode 100644 index 0000000..d92b7f8 --- /dev/null +++ b/UserTools/ReadQueryReceiverReplySender/ReadReceiveMonitoring.h @@ -0,0 +1,38 @@ +#ifndef ReadReceiveMonitoring_H +#define ReadReceiveMonitoring_H + +#include "MonitoringVariables.h" + +class ReadReceiveMonitoring : public MonitoringVariables { + public: + ReadReceiveMonitoring(){}; + ~ReadReceiveMonitoring(){}; + + std::atomic polls_failed; // error polling socket + std::atomic rcv_fails; // error in recv_from + std::atomic send_fails; // error in send_to + std::atomic msgs_rcvd; // messages successfully received + std::atomic msgs_sent; // messages successfully received + std::atomic bad_msgs; // messages with the wrong number of zmq parts + std::atomic in_buffer_transfers; // transfers of thread-local message vector to datamodel + std::atomic out_buffer_transfers; // transfers of thread-local message vector to datamodel + std::atomic thread_crashes; // restarts of tool worker thread (main thread found reader thread 'running' was false) + + std::string toJSON(){ + + std::string s="{\"polls_failed\":"+std::to_string(polls_failed.load()) + +",\"rcv_fails\":"+std::to_string(rcv_fails.load()) + +",\"send_fails\":"+std::to_string(send_fails.load()) + +",\"msgs_rcvd\":"+std::to_string(msgs_rcvd.load()) + +",\"bad_msgs\":"+std::to_string(bad_msgs.load()) + +",\"msgs_sent\":"+std::to_string(msgs_sent.load()) + +",\"in_buffer_transfers\":"+std::to_string(in_buffer_transfers.load()) + +",\"out_buffer_transfers\":"+std::to_string(out_buffer_transfers.load()) + +",\"thread_crashes\":"+std::to_string(thread_crashes.load()) + +"}"; + + return s; + } +}; + +#endif diff --git a/UserTools/ResultWorkers/ResultWorkerMonitoring.h b/UserTools/ResultWorkers/ResultWorkerMonitoring.h new file mode 100644 index 0000000..91228d5 --- /dev/null +++ b/UserTools/ResultWorkers/ResultWorkerMonitoring.h @@ -0,0 +1,33 @@ +#ifndef ResultWorkerMonitoring_H +#define ResultWorkerMonitoring_H + +#include "MonitoringVariables.h" + +class ResultWorkerMonitoring : public MonitoringVariables { + public: + ResultWorkerMonitoring(){}; + ~ResultWorkerMonitoring(){}; + + std::atomic read_batches_processed; + std::atomic write_batches_processed; + std::atomic jobs_failed; + std::atomic jobs_completed; + std::atomic result_access_errors; + + std::atomic thread_crashes; // restarts of tool worker thread (main thread found reader thread 'running' was false) + + std::string toJSON(){ + + std::string s="{\"read_batches_processed\":"+std::to_string(read_batches_processed.load()) + +",\"write_batches_processed\":"+std::to_string(write_batches_processed.load()) + +",\"result_access_errors\":"+std::to_string(result_access_errors.load()) + +",\"jobs_completed\":"+std::to_string(jobs_completed.load()) + +",\"jobs_failed\":"+std::to_string(jobs_failed.load()) + +",\"thread_crashes\":"+std::to_string(thread_crashes.load()) + +"}"; + + return s; + } +}; + +#endif diff --git a/UserTools/ResultWorkers/ResultWorkers.cpp b/UserTools/ResultWorkers/ResultWorkers.cpp index 429572d..8225401 100644 --- a/UserTools/ResultWorkers/ResultWorkers.cpp +++ b/UserTools/ResultWorkers/ResultWorkers.cpp @@ -5,15 +5,21 @@ ResultWorkers::ResultWorkers():Tool(){} bool ResultWorkers::Initialise(std::string configfile, DataModel &data){ - if(configfile!="") m_variables.Initialise(configfile); + InitialiseTool(data); + m_configfile = configfile; + InitialiseConfiguration(configfile); //m_variables.Print(); - m_data= &data; - m_log= m_data->Log; - if(!m_variables.Get("verbose",m_verbose)) m_verbose=1; + ExportConfiguration(); + + // monitoring struct to encapsulate tracking info + std::unique_lock locker(m_data->monitoring_variables_mtx); + m_data->monitoring_variables.emplace(m_tool_name, &monitoring_vars); + thread_args.m_data = m_data; + thread_args.monitoring_vars = &monitoring_vars; m_data->utils.CreateThread("result_job_distributor", &Thread, &thread_args); m_data->num_threads++; @@ -28,8 +34,8 @@ bool ResultWorkers::Execute(){ if(!thread_args.running){ Log(m_tool_name+" Execute found thread not running!",v_error); Finalise(); - Initialise(); // FIXME should we give up if Initialise returns false? should we set StopLoop to 1? - ++m_data->result_job_distributor_thread_crashes; + Initialise(m_configfile, *m_data); // FIXME should we give up if Initialise returns false? should we set StopLoop to 1? + ++(monitoring_vars.thread_crashes); } return true; @@ -41,9 +47,12 @@ bool ResultWorkers::Finalise(){ // signal job distributor thread to stop Log(m_tool_name+": Joining receiver thread",v_warning); m_data->utils.KillThread(&thread_args); - Log(m_tool_name+": Finished",v_warning); m_data->num_threads--; + std::unique_lock locker(m_data->monitoring_variables_mtx); + m_data->monitoring_variables.erase(m_tool_name); + + Log(m_tool_name+": Finished",v_warning); return true; } @@ -63,13 +72,14 @@ void ResultWorkers::Thread(Thread_args* args){ for(int i=0; ilocal_msg_queue.size(); ++i){ // add a new Job to the job queue to process this data - Job* the_job = job_pool.GetNew("result_worker"); + Job* the_job = m_args->m_data->job_pool.GetNew("result_worker"); + the_job->out_pool = &m_args->m_data->job_pool; if(the_job->data == nullptr){ // on first creation of the job, make it a JobStruct to encapsulate its data // N.B. Pool::GetNew will only invoke the constructor if this is a new instance, // (not if it's been used before and then returned to the pool) // so don't pass job-specific variables to the constructor - the_job->data = job_struct_pool.GetNew(&job_struct_pool, m_args->m_data); + the_job->data = m_args->job_struct_pool.GetNew(&m_args->job_struct_pool, m_args->m_data, m_args->monitoring_vars); } else { // FIXME error std::cerr<<"result_worker Job with non-null data pointer!"<func = ResultJob; the_job->fail_func = ResultJobFail; - ResultJobStruct* job_data = dynamic_cast(the_job->data); + ResultJobStruct* job_data = static_cast(the_job->data); job_data->batch = m_args->local_msg_queue[i]; + job_data->m_job_name = "result_worker"; - /*ok =*/ m_args->m_data->job_queue.AddJob(the_job); // just checks if you've defined func and first_vals = true; + m_args->m_data->job_queue.AddJob(the_job); } @@ -89,7 +100,7 @@ void ResultWorkers::Thread(Thread_args* args){ // maybe we can generalise to setreply if needed, depending on reply format & batching of read queries // or do we just do this in the connection / reply sender thread(s)? - return true; + return; } // ««-------------- ≪ °◇◆◇° ≫ --------------»» @@ -98,6 +109,7 @@ void ResultWorkers::ResultJobFail(void*& arg){ // safety check in case the job somehow fails after returning its args to the pool if(arg==nullptr){ + std::cerr<<"multicast worker fail with no args"<(arg); - ++(*m_args->m_data->result_worker_job_fails); + std::cerr<m_job_name<<" failure"<monitoring_vars->jobs_failed); // return our job args to the pool - m_args->m_pool.Add(m_args); + m_args->m_pool->Add(m_args); m_args = nullptr; // clear the local m_args variable... not strictly necessary arg = nullptr; // clear the job 'data' member variable + return; } -void ResultWorkers::ResultJob(void*& arg){ +bool ResultWorkers::ResultJob(void*& arg){ ResultJobStruct* m_args = reinterpret_cast(arg); @@ -131,56 +145,86 @@ void ResultWorkers::ResultJob(void*& arg){ for(ZmqQuery& query : m_args->batch->queries){ // set whether the query succeeded or threw an exception - query.setsuccess(uint32_t succeeded); // FIXME FILL - - // returned rows are sent back formatted as JSON, with each row a new zmq::message_t - // resize zmq vector in preparation - query.setresponserows(std::size(query.result)); - - if(query.topic()[2]!=query_topic::generic){ + if(query.result.query().empty()){ // FIXME not sure if this is a good check necessarily, esp w/pipelining? - // standard queries generated by the libDAQInterface use `row_to_json` - // to request results already packaged up into one JSON per row - // so all we need to do is copy that into the zmq message - for(size_t i=0; itmpval = "{"; - for (pqxx::row::iterator it=query.result[i].begin(); ittmpval += ", "; - m_args->tmpval += "\"" + it->name() + "\":"; - // Field values are returned bare: i.e. '3' or 'cat' or '{"iam":"ajson"}' - // but to convert this into JSON, strings need to be quoted: - // i.e. { "field1":3, "field2":"cat", "field3":{"iam":"ajson"} } - // this means we need to add enclosing quotes *only* for string fields - if((it->type()==18) || (it->type()==25) || (it->type()==1042) || (it->type()==1043)){ - m_args->tmpval += "\""+it->c_str()+"\""; - } else { - m_args->tmpval += it->c_str(); + // just for good measure, when we try to access the pqxx result, + // enclose within try just in case it throws something + try { + // standard queries generated by the libDAQInterface use `row_to_json` + // to request results already packaged up into one JSON per row + // so all we need to do is copy that into the zmq message + for(size_t i=0; imonitoring_vars->result_access_errors); } - m_args->tmpval += "}"; - query.setresponse(i, m_args->tmpval); + } else { - } + // just for good measure, when we try to access the pqxx result, + // enclose within try just in case it throws something + try { + // TODO if we can safely shoehorn in a wrapping call to `row_to_json` + // around a user's generic sql, we can combine this with the above. + // But, given the arbitrary complexity of statements, this may not be possible. + // in which case, we need to loop over rows and convert them to JSON manually + for(size_t i=0; itmpval = "{"; + for (pqxx::row::iterator it=query.result[i].begin(); ittmpval += ", "; + m_args->tmpval += "\"" + std::string{it->name()} + "\":"; + // Field values are returned bare: i.e. '3' or 'cat' or '{"iam":"ajson"}' + // but to convert this into JSON, strings need to be quoted: + // i.e. { "field1":3, "field2":"cat", "field3":{"iam":"ajson"} } + // this means we need to add enclosing quotes *only* for string fields + if((it->type()==18) || (it->type()==25) || (it->type()==1042) || (it->type()==1043)){ + m_args->tmpval += "\""+std::string{it->c_str()}+"\""; + } else { + m_args->tmpval += it->c_str(); + } + } + m_args->tmpval += "}"; + + query.setresponse(i, m_args->tmpval); + } + + } catch (std::exception& e){ + std::cerr<<"caught "<monitoring_vars->result_access_errors); + } + + } // generic query, manual json formation rom fields + + // release pqxx::result query.result.clear(); - } - + } // if we had a result object } // loop over queries in this batch + ++(m_args->monitoring_vars->read_batches_processed); + } else { // process batch of write queries @@ -198,7 +242,7 @@ void ResultWorkers::ResultJob(void*& arg){ for(ZmqQuery& query : m_args->batch->queries){ - switch(query.topic()[2]){ + switch(query_topic{query.topic()[2]}){ // alarms return just the success status case query_topic::alarm: query.setsuccess(m_args->batch->alarm_batch_success); @@ -210,7 +254,7 @@ void ResultWorkers::ResultJob(void*& arg){ query.setsuccess(devconfigs_ok); if(devconfigs_ok){ query.setresponserows(1); - query.setresponse(0, devconfig_version_nums[devconfig_i++]); + query.setresponse(0, m_args->batch->devconfig_version_nums[devconfig_i++]); } break; @@ -218,7 +262,7 @@ void ResultWorkers::ResultJob(void*& arg){ query.setsuccess(runconfigs_ok); if(runconfigs_ok){ query.setresponserows(1); - query.setresponse(0, runconfig_version_nums[runconfig_i++]); + query.setresponse(0, m_args->batch->runconfig_version_nums[runconfig_i++]); } break; @@ -226,7 +270,7 @@ void ResultWorkers::ResultJob(void*& arg){ query.setsuccess(calibrations_ok); if(calibrations_ok){ query.setresponserows(1); - query.setresponse(0, calibration_version_nums[calibration_i++]); + query.setresponse(0, m_args->batch->calibration_version_nums[calibration_i++]); } break; @@ -234,7 +278,7 @@ void ResultWorkers::ResultJob(void*& arg){ query.setsuccess(plotlyplots_ok); if(plotlyplots_ok){ query.setresponserows(1); - query.setresponse(0, plotlyplot_version_nums[plotlyplot_i++]); + query.setresponse(0, m_args->batch->plotlyplot_version_nums[plotlyplot_i++]); } break; @@ -242,17 +286,62 @@ void ResultWorkers::ResultJob(void*& arg){ query.setsuccess(rootplots_ok); if(rootplots_ok){ query.setresponserows(1); - query.setresponse(0, rootplot_version_nums[rootplot_i++]); + query.setresponse(0, m_args->batch->rootplot_version_nums[rootplot_i++]); + } + break; + + case query_topic::generic: + // just for good measure, when we try to access the pqxx result, + // enclose within try just in case it throws something + try { + // TODO if we can safely shoehorn in a wrapping call to `row_to_json` + // around a user's generic sql, we can combine this with the above. + // But, given the arbitrary complexity of statements, this may not be possible. + // in which case, we need to loop over rows and convert them to JSON manually + for(size_t i=0; itmpval = "{"; + for (pqxx::row::iterator it=query.result[i].begin(); ittmpval += ", "; + m_args->tmpval += "\"" + std::string{it->name()} + "\":"; + // Field values are returned bare: i.e. '3' or 'cat' or '{"iam":"ajson"}' + // but to convert this into JSON, strings need to be quoted: + // i.e. { "field1":3, "field2":"cat", "field3":{"iam":"ajson"} } + // this means we need to add enclosing quotes *only* for string fields + if((it->type()==18) || (it->type()==25) || (it->type()==1042) || (it->type()==1043)){ + m_args->tmpval += "\""+std::string{it->c_str()}+"\""; + } else { + m_args->tmpval += it->c_str(); + } + } + m_args->tmpval += "}"; + + query.setresponse(i, m_args->tmpval); + } + + } catch (std::exception& e){ + std::cerr<<"caught "<monitoring_vars->result_access_errors); } break; default: // FIXME corrupted topic, log it. + //std::cerr<monitoring_vars->write_batches_processed); + } // if/else on whether this batch was read/write // } // loop over query batches @@ -264,14 +353,15 @@ void ResultWorkers::ResultJob(void*& arg){ m_args->m_data->query_replies.push_back(m_args->batch); locker.unlock(); - ++(*m_args->m_data->result_worker_job_successes); + std::cerr<m_job_name<<" completed"<monitoring_vars->jobs_completed); // return our job args to the pool - m_args->m_pool.Add(m_args); // return our job args to the job args struct pool + m_args->m_pool->Add(m_args); // return our job args to the job args struct pool m_args = nullptr; // clear the local m_args variable... not strictly necessary arg = nullptr; // clear the job 'data' member variable - return; + return true; } diff --git a/UserTools/ResultWorkers/ResultWorkers.h b/UserTools/ResultWorkers/ResultWorkers.h index 0029cc9..9a9679d 100644 --- a/UserTools/ResultWorkers/ResultWorkers.h +++ b/UserTools/ResultWorkers/ResultWorkers.h @@ -6,7 +6,7 @@ #include "Tool.h" #include "DataModel.h" - +#include "ResultWorkerMonitoring.h" /** * \class ResultWorkers @@ -21,9 +21,11 @@ // class for things passed to result worker threads struct ResultJobStruct { - ResultJobStruct(Pool* pool, DataModel* data) : m_pool(pool) m_data(data){}; + ResultJobStruct(Pool* pool, DataModel* data, ResultWorkerMonitoring* mon) : m_pool(pool), m_data(data), monitoring_vars(mon){}; DataModel* m_data; + ResultWorkerMonitoring* monitoring_vars; Pool* m_pool; + std::string m_job_name; QueryBatch* batch; std::stringstream ss; std::string tmpval; @@ -33,8 +35,9 @@ struct ResultJobStruct { struct ResultJobDistributor_args : Thread_args { DataModel* m_data; + ResultWorkerMonitoring* monitoring_vars; std::vector local_msg_queue; // swap with datamodel and then pass out to jobs - Pool job_struct_pool(true, 1000, 100); ///< pool for job args structs // FIXME default args + Pool job_struct_pool{true, 1000, 100}; ///< pool for job args structs // FIXME default args }; @@ -49,8 +52,9 @@ class ResultWorkers: public Tool { private: static void Thread(Thread_args* args); ResultJobDistributor_args thread_args; ///< args for the child thread that makes jobs for the job queue + ResultWorkerMonitoring monitoring_vars; - static void ResultJob(void*& arg); + static bool ResultJob(void*& arg); static void ResultJobFail(void*& args); }; diff --git a/UserTools/SocketManager/SocketManager.cpp b/UserTools/SocketManager/SocketManager.cpp index 4ead4f4..afcf3e6 100644 --- a/UserTools/SocketManager/SocketManager.cpp +++ b/UserTools/SocketManager/SocketManager.cpp @@ -5,19 +5,29 @@ SocketManager::SocketManager():Tool(){} bool SocketManager::Initialise(std::string configfile, DataModel &data){ - if(configfile!="") m_variables.Initialise(configfile); + InitialiseTool(data); + m_configfile = configfile; + InitialiseConfiguration(configfile); //m_variables.Print(); - m_data= &data; - m_log= m_data->Log; - if(!m_variables.Get("verbose",m_verbose)) m_verbose=1; int update_ms=2000; - m_variables.Get("verbose",update_ms); + m_variables.Get("update_ms",update_ms); + + ExportConfiguration(); + + std::unique_lock locker(m_data->monitoring_variables_mtx); + m_data->monitoring_variables.emplace(m_tool_name, &monitoring_vars); + + // for doing UpdateConnections + daq_utils = DAQUtilities(m_data->context); thread_args.m_data = m_data; + thread_args.monitoring_vars = &monitoring_vars; + thread_args.daq_utils = &daq_utils; thread_args.update_period_ms = std::chrono::milliseconds{update_ms}; - thread_args.last_update = std::chrononow(); + thread_args.last_update = std::chrono::steady_clock::now(); + m_data->utils.CreateThread("socket_manager", &Thread, &thread_args); m_data->num_threads++; @@ -27,11 +37,11 @@ bool SocketManager::Initialise(std::string configfile, DataModel &data){ bool SocketManager::Execute(){ - if(!thread_args->running){ + if(!thread_args.running){ Log(m_tool_name+" Execute found thread not running!",v_error); Finalise(); - Initialise(); // FIXME should we give up if Initialise returns false? should we set StopLoop to 1? - ++m_data->socket_manager_thread_crashes; + Initialise(m_configfile, *m_data); // FIXME should we give up if Initialise returns false? should we set StopLoop to 1? + ++(monitoring_vars.thread_crashes); } return true; @@ -43,9 +53,12 @@ bool SocketManager::Finalise(){ // signal job distributor thread to stop Log(m_tool_name+": Joining socket manager thread",v_warning); m_data->utils.KillThread(&thread_args); - Log(m_tool_name+": Finished",v_warning); m_data->num_threads--; + std::unique_lock locker(m_data->monitoring_variables_mtx); + m_data->monitoring_variables.erase(m_tool_name); + + Log(m_tool_name+": Finished",v_warning); return true; } @@ -53,21 +66,23 @@ void SocketManager::Thread(Thread_args* args){ SocketManager_args* m_args = dynamic_cast(args); - m_args->last_update = std::chrononow(); - m_args->m_data->Log("checking for new clients",22); //FIXME + m_args->last_update = std::chrono::steady_clock::now(); + //m_args->m_data->Log("checking for new clients",22); //FIXME bool new_clients=false; - std::unique_lock container_locker(managed_sockets_mtx); - for(SocketConnection* sock : m_data->managed_sockets){ + std::unique_lock container_locker(m_args->m_data->managed_sockets_mtx); + for(std::pair mgd_sock : m_args->m_data->managed_sockets){ + + ManagedSocket* sock = mgd_sock.second; std::unique_lock locker(sock->socket_mtx); - int new_conn_count = (sock->connections.size() - m_args->m_util->UpdateConnections(sock->service_name, sock->socket, sock->connections, "", sock->port_name)); + int new_conn_count = (sock->connections.size() - m_args->daq_utils->UpdateConnections(sock->service_name, sock->socket, sock->connections, "", sock->port_name)); locker.unlock(); if(new_conn_count!=0){ new_clients = true; - m_args->m_data->services->SendLog(m_tool_name+": "+std::to_string(std::abs(new_conn_count))+" new connections to "+sock->service_name, v_message); + //m_args->m_data->services->SendLog(m_tool_name+": "+std::to_string(std::abs(new_conn_count))+" new connections to "+sock->service_name, v_message); // FIXME logging // update the list of clients so they can be queried for(std::pair& aservice : sock->connections){ diff --git a/UserTools/SocketManager/SocketManager.h b/UserTools/SocketManager/SocketManager.h index 83dab3b..5af5dd9 100644 --- a/UserTools/SocketManager/SocketManager.h +++ b/UserTools/SocketManager/SocketManager.h @@ -5,6 +5,7 @@ #include "Tool.h" #include "DataModel.h" +#include "SocketManagerMonitoring.h" /** * \class SocketManager @@ -19,9 +20,11 @@ struct SocketManager_args : public Thread_args { DataModel* m_data; + SocketManagerMonitoring* monitoring_vars; + DAQUtilities* daq_utils; std::map clientsmap; - std::map> last_update; + std::chrono::time_point last_update; std::chrono::milliseconds update_period_ms; }; @@ -35,8 +38,10 @@ class SocketManager: public Tool { bool Finalise(); ///< Finalise function used to clean up resources. private: + DAQUtilities daq_utils{nullptr}; static void Thread(Thread_args* args); SocketManager_args thread_args; + SocketManagerMonitoring monitoring_vars; }; diff --git a/UserTools/SocketManager/SocketManagerMonitoring.h b/UserTools/SocketManager/SocketManagerMonitoring.h new file mode 100644 index 0000000..d82875d --- /dev/null +++ b/UserTools/SocketManager/SocketManagerMonitoring.h @@ -0,0 +1,23 @@ +#ifndef SocketManagerMonitoring_H +#define SocketManagerMonitoring_H + +#include "MonitoringVariables.h" + +class SocketManagerMonitoring : public MonitoringVariables { + public: + SocketManagerMonitoring(){}; + ~SocketManagerMonitoring(){}; + + // TODO add more monitoring + std::atomic thread_crashes; // restarts of tool worker thread (main thread found reader thread 'running' was false) + + std::string toJSON(){ + + std::string s="{\"thread_crashes\":"+std::to_string(thread_crashes.load()) + +"}"; + + return s; + } +}; + +#endif diff --git a/UserTools/Unity.h b/UserTools/Unity.h index a51bdce..d46c484 100644 --- a/UserTools/Unity.h +++ b/UserTools/Unity.h @@ -1,13 +1,15 @@ #include -#include "MulticastReceiver.h" +#include "MulticastReceiverSender.h" #include "MulticastWorkers.h" #include "DatabaseWorkers.h" -#include "QueueTrimmer.h" #include "WriteQueryReceiver.h" #include "ReadQueryReceiverReplySender.h" #include "WriteWorkers.h" -#include "MiddlemanNegotiate.h" #include "Monitoring.h" #include "SocketManager.h" #include "ResultWorkers.h" #include "JobManager.h" +/* +#include "QueueTrimmer.h" +#include "MiddlemanNegotiate.h" +*/ diff --git a/UserTools/WriteQueryReceiver/WriteQueryReceiver.cpp b/UserTools/WriteQueryReceiver/WriteQueryReceiver.cpp index 5fc7b7d..d7c9b7b 100644 --- a/UserTools/WriteQueryReceiver/WriteQueryReceiver.cpp +++ b/UserTools/WriteQueryReceiver/WriteQueryReceiver.cpp @@ -5,32 +5,37 @@ WriteQueryReceiver::WriteQueryReceiver():Tool(){} bool WriteQueryReceiver::Initialise(std::string configfile, DataModel &data){ - if(configfile!="") m_variables.Initialise(configfile); + InitialiseTool(data); + m_configfile = configfile; + InitialiseConfiguration(configfile); //m_variables.Print(); - m_data= &data; - m_log= m_data->Log; - /* ----------------------------------------- */ /* Configuration */ /* ----------------------------------------- */ - bool am_master = true; // FIXME not sure being used any more +// am_master = true; // FIXME not sure being used any more m_verbose=1; port_name = "db_write"; // FIXME do these timeouts need to be << transfer_period_ms? - int rcv_timeout_ms = 500; int poll_timeout_ms = 500; + int rcv_timeout_ms = 500; int transfer_period_ms = 200; int local_buffer_size = 200; + int rcv_hwm=10000; // FIXME sufficient? + int conns_backlog=1000; // FIXME sufficient? m_variables.Get("verbose",m_verbose); - m_variables.Get("rcv_timeout_ms",rcv_timeout_ms); - m_variables.Get("poll_timeout_ms",poll_timeout_ms); m_variables.Get("port_name", port_name); - m_variables.Get("am_master", am_master); + m_variables.Get("rcv_hwm", rcv_hwm); // max num outstanding messages in receive buffer + m_variables.Get("conns_backlog", conns_backlog); // max num oustanding connection requests + m_variables.Get("poll_timeout_ms",poll_timeout_ms); + m_variables.Get("rcv_timeout_ms",rcv_timeout_ms); m_variables.Get("local_buffer_size", local_buffer_size); m_variables.Get("transfer_period_ms", transfer_period_ms); +// m_variables.Get("am_master", am_master); + + ExportConfiguration(); /* ----------------------------------------- */ /* Socket Setup */ @@ -50,28 +55,33 @@ bool WriteQueryReceiver::Initialise(std::string configfile, DataModel &data){ // don't linger too long, it looks like the program crashed. managed_socket->socket->setsockopt(ZMQ_LINGER, 10); managed_socket->socket->setsockopt(ZMQ_SUBSCRIBE,"",0); - managed_socket->socket->setsockopt(ZMQ_RCVHWM,10000); // TODO are these sufficient? - managed_socket->socket->setsockopt(ZMQ_BACKLOG,1000); // TODO any other options? + managed_socket->socket->setsockopt(ZMQ_RCVHWM,rcv_hwm); + managed_socket->socket->setsockopt(ZMQ_BACKLOG,conns_backlog); // add the socket to the datamodel for the SocketManager, which will handle making new connections to clients std::unique_lock locker(m_data->managed_sockets_mtx); m_data->managed_sockets[port_name] = managed_socket; - locker.unlock(); /* ----------------------------------------- */ /* Thread Setup */ /* ----------------------------------------- */ + // monitoring struct to encapsulate tracking info + locker = std::unique_lock(m_data->monitoring_variables_mtx); + m_data->monitoring_variables.emplace(m_tool_name, &monitoring_vars); + thread_args.m_data = m_data; + thread_args.m_tool_name = m_tool_name; + thread_args.monitoring_vars = &monitoring_vars; thread_args.socket = managed_socket->socket; - thread_args.socket_mtx = managed_socket->socket_mtx; // for sharing socket with SocketManager + thread_args.socket_mtx = &managed_socket->socket_mtx; // for sharing socket with SocketManager thread_args.poll_timeout_ms = poll_timeout_ms; - thread_args.poll = zmq::pollitem_t{NULL, socket, ZMQ_POLLIN, 0}; - thread_args.in_local_queue = m_data->querybatch_pool.GetNew(); - thread_args.in_local_queue.reserve(local_buffer_size); + thread_args.poll = zmq::pollitem_t{*managed_socket->socket, 0, ZMQ_POLLIN, 0}; + thread_args.in_local_queue = m_data->querybatch_pool.GetNew(local_buffer_size); thread_args.local_buffer_size = local_buffer_size; - thread_args.transfer_period_ms = transfer_period_ms; + thread_args.transfer_period_ms = std::chrono::milliseconds{transfer_period_ms}; thread_args.make_new = true; + m_data->utils.CreateThread("write_query_receiver", &Thread, &thread_args); // thread needs a unique name m_data->num_threads++; @@ -84,13 +94,13 @@ bool WriteQueryReceiver::Execute(){ if(!thread_args.running){ Log(m_tool_name+" Execute found thread not running!",v_error); Finalise(); - Initialise(); // FIXME should we give up if Initialise returns false? should we set StopLoop to 1? - ++m_data->pub_rcv_thread_crashes; + Initialise(m_configfile, *m_data); // FIXME should we give up if Initialise returns false? should we set StopLoop to 1? + ++(monitoring_vars.thread_crashes); } /* FIXME are we doing this - if(m_data->am_master != am_master_last){ + if(am_master != am_master_last){ if(m_data->am_master) Promote(); else Demote(); } @@ -105,11 +115,11 @@ bool WriteQueryReceiver::Finalise(){ // signal background receiver thread to stop Log(m_tool_name+": Joining receiver thread",v_warning); m_data->utils.KillThread(&thread_args); - Log(m_tool_name+": Finished",v_warning); + std::cerr<<"WriteReceiver thread terminated"<num_threads--; if(m_data->managed_sockets.count(port_name)){ - std::unique_lock lock(m_data->managed_sockets_mtx); + std::unique_lock locker(m_data->managed_sockets_mtx); ManagedSocket* sock = m_data->managed_sockets[port_name]; m_data->managed_sockets.erase(port_name); locker.unlock(); @@ -117,6 +127,10 @@ bool WriteQueryReceiver::Finalise(){ delete sock; } + std::unique_lock locker(m_data->monitoring_variables_mtx); + m_data->monitoring_variables.erase(m_tool_name); + + Log(m_tool_name+": Finished",v_warning); return true; } @@ -126,24 +140,24 @@ void WriteQueryReceiver::Thread(Thread_args* args){ // transfer to datamodel // ===================== - if(m_args->in_local_queue->size() >= m_args->local_buffer_size || - (m_args->last_transfer - std::chrononow()) > transfer_period_ms){ + if(m_args->in_local_queue->queries.size() >= m_args->local_buffer_size || + (m_args->last_transfer - std::chrono::steady_clock::now()) > m_args->transfer_period_ms){ - if(!make_new) pop_back(); - if(!m_args->in_local_queue->empty()){ + if(!m_args->make_new) m_args->in_local_queue->queries.pop_back(); + if(!m_args->in_local_queue->queries.empty()){ + + std::clog<m_tool_name<<": adding "<in_local_queue->queries.size() + <<" messages to datamodel"< locker(m_args->m_data->write_msg_queue_mtx); m_args->m_data->write_msg_queue.push_back(m_args->in_local_queue); locker.unlock(); - m_args->in_local_queue = m_args->m_data->querybatch_pool.GetNew(); - m_args->in_local_queue.reserve(m_args->local_buffer_size); + m_args->in_local_queue = m_args->m_data->querybatch_pool.GetNew(m_args->local_buffer_size); - m_args->m_data->Log(m_tool_name+": added "+std::to_string(next_index) - +" messages to datamodel",5); // FIXME better logging - m_args->last_transfer = std::chrononow(); + m_args->last_transfer = std::chrono::steady_clock::now(); m_args->make_new=true; - ++(*m_args->m_data->write_buffer_transfers); + ++(m_args->monitoring_vars->in_buffer_transfers); } @@ -152,80 +166,115 @@ void WriteQueryReceiver::Thread(Thread_args* args){ // poll // ==== try { - std::unique_lock lock(m_args->socket_mtx); + + std::unique_lock locker(*m_args->socket_mtx); m_args->get_ok = zmq::poll(&m_args->poll, 1, m_args->poll_timeout_ms); + + if(m_args->get_ok<0){ + std::cerr<m_tool_name<<" poll failed with "<monitoring_vars->polls_failed); +// m_args->running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? + return; + } + } catch(zmq::error_t& err){ // ignore poll aborting due to signals if(zmq_errno()==EINTR) return; - std::cerr<running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? - ++(*m_args->m_data->write_polls_failed); + //std::cerr<m_tool_name<<" poll caught "<monitoring_vars->polls_failed); +// m_args->running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? return; - } // FIXME catch non-zmq errors? can we handle them any better? - catch(...){ - std::cerr<running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? - ++(*m_args->m_data->write_polls_failed); + } catch(std::exception& err){ + std::cerr<m_tool_name<<" poll caught "<monitoring_vars->polls_failed); +// m_args->running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? return; - } - if(m_args->get_ok<0){ - std::cerr<running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? - ++(*m_args->m_data->write_polls_failed); + } catch(...){ + std::cerr<m_tool_name<<" poll caught "<monitoring_vars->polls_failed); +// m_args->running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? return; } // read // ==== if(m_args->poll.revents & ZMQ_POLLIN){ - m_data->Log(m_tool_name+": got a write query from client",v_debug); + std::clog<m_tool_name<<": receiving message"<make_new){ - m_args->in_local_queue->emplace_back(); // FIXME we could resize(local_buffer_size) on retreive new + m_args->in_local_queue->queries.emplace_back(); // FIXME we could resize(local_buffer_size) on retreive new m_args->make_new = false; // then resize down to actual size on transfer out } - ZmqQuery& msg_buf = m_args->in_local_queue->back().queries; - msg_buf.resize(4); + ZmqQuery& msg_buf = m_args->in_local_queue->queries.back(); + msg_buf.parts.resize(4); // received parts are [topic, client, msg_id, query] // reorder parts on receipt as client and msg_id will be left untouched and re-used for response static constexpr char part_order[4] = {2,0,1,3}; - std::unique_lock locker(m_args->socket_mtx); - for(m_args->msg_parts=0; m_args->msg_parts<4; ++m_args->msg_parts){ - m_args->get_ok = m_args->socket->recv(&msg_buf[part_order[m_args->msg_parts]]); - if(!m_args->get_ok || !msg_buf[part_order[m_args->msg_parts]].more()) break; - } - - // if there are more than 4 parts, read the remainder to flush the buffer, but discard the message - if(m_args->get_ok && msg_buf[3].more()){ - while(true){ - m_args->socket->recv(&m_args->msg_discard); - ++m_args->msg_parts; + try { + + // receive expected 4 parts + std::unique_lock locker(*m_args->socket_mtx); + for(m_args->msg_parts=0; m_args->msg_parts<4; ++m_args->msg_parts){ + + m_args->get_ok = m_args->socket->recv(&msg_buf[part_order[m_args->msg_parts]]); + + if(!m_args->get_ok){ + std::cerr<m_tool_name<<" receive failed with "<monitoring_vars->rcv_fails); +// m_args->running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? + break; + } + + if(!msg_buf[part_order[m_args->msg_parts]].more()) break; + } - } - locker.unlock(); - - // if the read failed, discard the message - if(!m_args->get_ok){ - std::cerr<msg_parts<m_data->write_rcv_fails); + + // if there are more than 4 parts, read the remainder to flush the buffer, but discard the message + if(m_args->get_ok && msg_buf[3].more()){ + while(true){ + m_args->socket->recv(&m_args->msg_discard); + ++m_args->msg_parts; + } + std::cerr<m_tool_name<<": Unexpected "<msg_parts<<" part message"<monitoring_vars->bad_msgs); + return; + } + + // if receive failed, discard the message + if(!m_args->get_ok){ + std::cerr<m_tool_name<<": receive failed with "<monitoring_vars->rcv_fails); + return; + } + + // else success + m_args->make_new=true; + ++(m_args->monitoring_vars->msgs_rcvd); + + } catch(zmq::error_t& err){ + // receive aborted due to signals? + if(zmq_errno()==EINTR) return; // FIXME is this appropriate here? + std::cerr<m_tool_name<<" receive caught "<monitoring_vars->rcv_fails); +// m_args->running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? return; - } - - // if there weren't 4 parts, discard the message - if(m_args->msg_parts!=4){ - std::cerr<msg_parts<<" part message"<m_data->write_bad_msgs); + } catch(std::exception& err){ + std::cerr<m_tool_name<<" receive caught "<monitoring_vars->rcv_fails); +// m_args->running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? + return; + } catch(...){ + std::cerr<m_tool_name<<" receive caught "<monitoring_vars->rcv_fails); +// m_args->running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? return; } - // else success - make_new=true; - ++(*m_args->m_data->write_msgs_rcvd); } // else no messages from clients diff --git a/UserTools/WriteQueryReceiver/WriteQueryReceiver.h b/UserTools/WriteQueryReceiver/WriteQueryReceiver.h index f068d3e..31c176e 100644 --- a/UserTools/WriteQueryReceiver/WriteQueryReceiver.h +++ b/UserTools/WriteQueryReceiver/WriteQueryReceiver.h @@ -6,6 +6,7 @@ #include "Tool.h" #include "DataModel.h" +#include "WriteReceiveMonitoring.h" /** * \class WriteQueryReceiver @@ -20,7 +21,9 @@ struct WriteQueryReceiver_args : public Thread_args { + std::string m_tool_name; DataModel* m_data; + WriteReceiveMonitoring* monitoring_vars; zmq::socket_t* socket=nullptr; std::mutex* socket_mtx; // for sharing the socket with ServicesManager Tool for finding clients @@ -49,6 +52,7 @@ class WriteQueryReceiver: public Tool { private: static void Thread(Thread_args* args); WriteQueryReceiver_args thread_args; + WriteReceiveMonitoring monitoring_vars; std::string port_name; // name by which clients advertise sockets for sending write queries to the DB diff --git a/UserTools/WriteQueryReceiver/WriteReceiveMonitoring.h b/UserTools/WriteQueryReceiver/WriteReceiveMonitoring.h new file mode 100644 index 0000000..1d6f5ab --- /dev/null +++ b/UserTools/WriteQueryReceiver/WriteReceiveMonitoring.h @@ -0,0 +1,32 @@ +#ifndef WriteReceiveMonitoring_H +#define WriteReceiveMonitoring_H + +#include "MonitoringVariables.h" + +class WriteReceiveMonitoring : public MonitoringVariables { + public: + WriteReceiveMonitoring(){}; + ~WriteReceiveMonitoring(){}; + + std::atomic polls_failed; // error polling socket + std::atomic rcv_fails; // error in recv_from + std::atomic msgs_rcvd; // messages successfully received + std::atomic bad_msgs; // messages with the wrong number of zmq parts + std::atomic in_buffer_transfers; // transfers of thread-local message vector to datamodel + std::atomic thread_crashes; // restarts of tool worker thread (main thread found reader thread 'running' was false) + + std::string toJSON(){ + + std::string s="{\"polls_failed\":"+std::to_string(polls_failed.load()) + +",\"rcv_fails\":"+std::to_string(rcv_fails.load()) + +",\"msgs_rcvd\":"+std::to_string(msgs_rcvd.load()) + +",\"bad_msgs\":"+std::to_string(bad_msgs.load()) + +",\"in_buffer_transfers\":"+std::to_string(in_buffer_transfers.load()) + +",\"thread_crashes\":"+std::to_string(thread_crashes.load()) + +"}"; + + return s; + } +}; + +#endif diff --git a/UserTools/WriteWorkers/WriteWorkerMonitoring.h b/UserTools/WriteWorkers/WriteWorkerMonitoring.h new file mode 100644 index 0000000..59c7b07 --- /dev/null +++ b/UserTools/WriteWorkers/WriteWorkerMonitoring.h @@ -0,0 +1,28 @@ +#ifndef WriteWorkerMonitoring_H +#define WriteWorkerMonitoring_H + +#include "MonitoringVariables.h" + +class WriteWorkerMonitoring : public MonitoringVariables { + public: + WriteWorkerMonitoring(){}; + ~WriteWorkerMonitoring(){}; + + std::atomic jobs_failed; + std::atomic jobs_completed; + std::atomic msgs_processed; // each job concatenates a batch of messages; this sums all batches + std::atomic thread_crashes; // restarts of tool worker thread (main thread found reader thread 'running' was false) + + std::string toJSON(){ + + std::string s="{\"jobs_failed\":"+std::to_string(jobs_failed.load()) + +",\"jobs_completed\":"+std::to_string(jobs_completed.load()) + +",\"msgs_processed\":"+std::to_string(msgs_processed.load()) + +",\"thread_crashes\":"+std::to_string(thread_crashes.load()) + +"}"; + + return s; + } +}; + +#endif diff --git a/UserTools/WriteWorkers/WriteWorkers.cpp b/UserTools/WriteWorkers/WriteWorkers.cpp index aafbc1c..6fa4d40 100644 --- a/UserTools/WriteWorkers/WriteWorkers.cpp +++ b/UserTools/WriteWorkers/WriteWorkers.cpp @@ -5,15 +5,21 @@ WriteWorkers::WriteWorkers():Tool(){} bool WriteWorkers::Initialise(std::string configfile, DataModel &data){ - if(configfile!="") m_variables.Initialise(configfile); + InitialiseTool(data); + m_configfile = configfile; + InitialiseConfiguration(configfile); //m_variables.Print(); - m_data= &data; - m_log= m_data->Log; - if(!m_variables.Get("verbose",m_verbose)) m_verbose=1; + ExportConfiguration(); + + // monitoring struct to encapsulate tracking info + std::unique_lock locker(m_data->monitoring_variables_mtx); + m_data->monitoring_variables.emplace(m_tool_name, &monitoring_vars); + thread_args.m_data = m_data; + thread_args.monitoring_vars = &monitoring_vars; m_data->utils.CreateThread("write_job_distributor", &Thread, &thread_args); m_data->num_threads++; @@ -28,8 +34,8 @@ bool WriteWorkers::Execute(){ if(!thread_args.running){ Log(m_tool_name+" Execute found thread not running!",v_error); Finalise(); - Initialise(); // FIXME should we give up if Initialise returns false? should we set StopLoop to 1? - ++m_data->write_job_distributor_thread_crashes; + Initialise(m_configfile, *m_data); // FIXME should we give up if Initialise returns false? should we set StopLoop to 1? + ++(monitoring_vars.thread_crashes); } return true; @@ -41,16 +47,20 @@ bool WriteWorkers::Finalise(){ // signal job distributor thread to stop Log(m_tool_name+": Joining job distributor thread",v_warning); m_data->utils.KillThread(&thread_args); - Log(m_tool_name+": Finished",v_warning); m_data->num_threads--; + std::unique_lock locker(m_data->monitoring_variables_mtx); + m_data->monitoring_variables.erase(m_tool_name); + + Log(m_tool_name+": Finished",v_warning); return true; } void WriteWorkers::Thread(Thread_args* args){ - WriteJobDistributor_args* m_args = reinterpret_cast(args); + WriteJobDistributor_args* m_args = dynamic_cast(args); + m_args->local_msg_queue.clear(); // grab a batch of write queries std::unique_lock locker(m_args->m_data->write_msg_queue_mtx); @@ -63,13 +73,14 @@ void WriteWorkers::Thread(Thread_args* args){ for(int i=0; ilocal_msg_queue.size(); ++i){ // add a new Job to the job queue to process this data - Job* the_job = m_args->m_data->job_pool.GetNew(&m_args->m_data->job_pool, "write_worker"); + Job* the_job = m_args->m_data->job_pool.GetNew("write_worker"); + the_job->out_pool = &m_args->m_data->job_pool; if(the_job->data == nullptr){ // on first creation of the job, make it a JobStruct to encapsulate its data // N.B. Pool::GetNew will only invoke the constructor if this is a new instance, // (not if it's been used before and then returned to the pool) // so don't pass job-specific variables to the constructor - the_job->data = m_args->job_struct_pool.GetNew(&m_args->job_struct_pool, m_args->m_data); + the_job->data = m_args->job_struct_pool.GetNew(&m_args->job_struct_pool, m_args->m_data, m_args->monitoring_vars); } else { // this should never happen as jobs should return their args to the pool std::cerr<<"WriteWorker Job with non-null data pointer!"<(the_job->data); + WriteJobStruct* job_data = static_cast(the_job->data); job_data->local_msg_queue = m_args->local_msg_queue[i]; + job_data->m_job_name = "write_worker"; the_job->func = WriteMessageJob; the_job->fail_func = WriteMessageFail; - /*ok =*/ m_args->m_data->job_queue.AddJob(the_job); // just checks if you've defined func and first_vals = true; + m_args->m_data->job_queue.AddJob(the_job); } - return true; + return; } // ««-------------- ≪ °◇◆◇° ≫ --------------»» @@ -97,6 +109,7 @@ void WriteWorkers::WriteMessageFail(void*& arg){ // safety check in case the job somehow fails after returning its args to the pool if(arg==nullptr){ + std::cerr<<"multicast worker fail with no args"<m_data->query_buffer_pool.Add(m_args->msg_buffer); << FIXME not back to the pool but reply queue - WriteJobStruct* m_args=reinterpret_cast(arg); - ++(*m_args->m_data->write_worker_job_fails); + WriteJobStruct* m_args=static_cast(arg); + std::cerr<m_job_name<<" failure"<monitoring_vars->jobs_failed); // return our job args to the pool - m_args->m_pool.Add(m_args); + m_args->m_pool->Add(m_args); m_args = nullptr; // clear the local m_args variable... not strictly necessary arg = nullptr; // clear the job 'data' member variable + return; } -void WriteWorkers::WriteMessageJob(void*& arg){ +bool WriteWorkers::WriteMessageJob(void*& arg){ - WriteJobStruct* m_args = reinterpret_cast(arg); + WriteJobStruct* m_args = static_cast(arg); - m_args->local_msg_queue.reset(); + m_args->local_msg_queue->reset(); // pull next query from batch for(size_t i=0; ilocal_msg_queue->queries.size(); ++i){ @@ -148,48 +163,58 @@ void WriteWorkers::WriteMessageJob(void*& arg){ // as before, such batches need to be grouped according to destination table switch(query_topic{query.topic()[2]}){ // FIXME switch letter to query_type enum class - case query_topic::alarm: // ALARM + case query_topic::alarm: // alarm insertions require no return value, // but we still need to send back an acknowledgement once the alarm is inserted - m_args->out_buffer = m_args->local_msg_queue->alarm_buffer; + m_args->out_buffer = &m_args->local_msg_queue->alarm_buffer; break; - case query_topic::dev_config: // DEVCONFIG - m_args->out_buffer = m_args->local_msg_queue->devconfig_buffer; + case query_topic::dev_config: + m_args->out_buffer = &m_args->local_msg_queue->devconfig_buffer; break; - case query_topic::run_config: // RUNCONFIG - m_args->out_buffer = m_args->local_msg_queue->runconfig_buffer; + case query_topic::run_config: + m_args->out_buffer = &m_args->local_msg_queue->runconfig_buffer; break; - case query_topic::calibration: // CALIBRATION - m_args->out_buffer = m_args->local_msg_queue->calibration_buffer; + case query_topic::calibration: + m_args->out_buffer = &m_args->local_msg_queue->calibration_buffer; break; - case query_topic::plotlyplot: // PLOTLYPLOT - m_args->out_buffer = m_args->local_msg_queue->plotlyplot_buffer; + case query_topic::plotlyplot: + m_args->out_buffer = &m_args->local_msg_queue->plotlyplot_buffer; break; - case query_topic::rootplot: // TROOTPLOT (yeah the T is just so it's unique...) FIXME maybe topic can just be a unique char - m_args->out_buffer = m_args->local_msg_queue->rooplot_buffer; + case query_topic::rootplot: + m_args->out_buffer = &m_args->local_msg_queue->rooplot_buffer; + break; + case query_topic::generic: + // these can't be buffered, just note their indices for the DB workers + m_args->local_msg_queue->generic_write_query_indices.push_back(i); + continue; break; default: + //std::cerr<<"unrecognised topic"<out_buffer += ", "; - m_args->out_buffer += query.msg(); + if(m_args->out_buffer->length()>1) (*m_args->out_buffer) += ", "; + (*m_args->out_buffer) += query.msg(); + + ++(m_args->monitoring_vars->msgs_processed); } // pass the batch onto the next stage of the pipeline for the DatabaseWorkers std::unique_lock locker(m_args->m_data->write_query_queue_mtx); - m_args->m_data->alarm_batch_queue.push_back(m_args->write_query_queue); + m_args->m_data->write_query_queue.push_back(m_args->local_msg_queue); locker.unlock(); - ++(*m_args->m_data->write_worker_job_successes); + std::cerr<m_job_name<<" completed"<monitoring_vars->jobs_completed); // return our job args to the pool - m_args->m_pool.Add(m_args); // return our job args to the job args struct pool + m_args->m_pool->Add(m_args); // return our job args to the job args struct pool m_args = nullptr; // clear the local m_args variable... not strictly necessary arg = nullptr; // clear the job 'data' member variable - return; + return true; } diff --git a/UserTools/WriteWorkers/WriteWorkers.h b/UserTools/WriteWorkers/WriteWorkers.h index 3761da0..33bd050 100644 --- a/UserTools/WriteWorkers/WriteWorkers.h +++ b/UserTools/WriteWorkers/WriteWorkers.h @@ -5,6 +5,7 @@ #include "Tool.h" #include "DataModel.h" +#include "WriteWorkerMonitoring.h" /** * \class WriteWorkers @@ -19,20 +20,25 @@ // class for things passed to multicast worker threads struct WriteJobStruct { - WriteJobStruct(Pool* pool, DataModel* data) : m_pool(pool) m_data(data){}; + WriteJobStruct(Pool* pool, DataModel* data, WriteWorkerMonitoring* mon) : m_pool(pool), m_data(data), monitoring_vars(mon){}; DataModel* m_data; + WriteWorkerMonitoring* monitoring_vars; Pool* m_pool; + std::string m_job_name; QueryBatch* local_msg_queue; + std::string* out_buffer; }; struct WriteJobDistributor_args : Thread_args { DataModel* m_data; - std::vector*> local_msg_queue; // swap with datamodel and then pass out to jobs + WriteWorkerMonitoring* monitoring_vars; + std::string m_job_name; + std::vector local_msg_queue; // swap with datamodel and then pass out to jobs // maybe we can use shared_ptr instead of a job args pool? - only useful for jobs retaining their args, // i.e. job queues of a single type of job. - Pool job_struct_pool(true, 1000, 100); ///< pool for job args structs // FIXME default args + Pool job_struct_pool{true, 1000, 100}; ///< pool for job args structs // FIXME default args }; @@ -47,9 +53,10 @@ class WriteWorkers: public Tool { private: static void Thread(Thread_args* args); WriteJobDistributor_args thread_args; ///< args for the child thread that makes jobs for the job queue + WriteWorkerMonitoring monitoring_vars; + static bool WriteMessageJob(void*& arg); static void WriteMessageFail(void*& arg); - static void WriteMessageJob(void*& arg); }; diff --git a/UserTools/template/MyTool.o b/UserTools/template/MyTool.o deleted file mode 100644 index 6cb5f68bb0273318df8e09f4185bbf311d2df30e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 19720 zcmd5@dvx4Yo&P11(1p8HeDk>_h!O~WItmvLyaoKLWENW-N+HyebD$xCW?qhQ2 zH*c+jI6750A9m(y8g(4Eb*?Ont)Gf|r|R9wnYwP*`;$9;M}zBiKJ9v`BksY@qa-=ddE5~NcY1OM z>h8fc6U1F_&8U)cKr}d%;(CGWZ*03^U(|adPb#gi?b@wwaO4MVo7y%~r}wHmd2kde zUv%$$ekbVAx@z(2nu^$TCfpQU(0Np-jO8wm<@#!3UM3uhc_(5OM>6kNKXo#g-5GG4 znAacfjZVH241W5b&^weWf>XOtY72gA-p=-LQ_MR~ng)0JhVba>163i%oo)|z#d4pl zalNW=ESH42?cr88*Rk0>*dBIiFgtMoROnxO7@Dcn^==6FD$VCA&Gl0Adn&c&iPKDgX{sM^bGL_U-07ZhYs~wdFhjZ%aBrLUgxhjBQ!S8=>~op17~ocSx}zbM z>kqp^vtfxc;>*xV*~*BMh3@a4>wdBRQq^;M-RaxIA$NKOyDtoxnh8#QkG>GLTz*AZ z8OVAl*765k^^VD&-1)>#P>T@b;J4k_c2iq?UvOc&0E;v;$%srkTdF|xM;KCiC zmH3OWmR_RA@=u|0EhbaUVJaS z7Z@G6BwC&eWvKb4)X;Prn%3vWQF%F-eV?dYjmne3?Bk$|=C;?zyjZyY=qB_w zmO}|pYjnD;E1LUgo!fFSbGnLI*%U%k6XETW4Kl>stCv2VdL}a*^?o(`cQNm`8VOFd zK&_kmB-e^AH+Q2DE7uXA41G!rX3qtgWYSSM^-M5(CYilM`SV2tVV=gkpJ~zKe;Lhv z3f8WLwKo}Jfx^(W!qAW)#@K15%z07oKk~oFfNmA>WHgQcItnwRFf&?YCQ!%|W@fZJ zHBr!G$UBt3&yZJWqCBjTQLg3D+zl9Ncuw}54UmbILG=A6U{{9DIWBt;Xw>`n{H4fU z!zoja=r7z`IlZPHuS&MbSW#mr5W=ta~;B1O#l_;V#7A* z3Q?X}B>Jic6B9Obe$4yz>|=BLiP1LxoSVC?#+}?(&n>W)MyjI<=7Bi8i0P-!M^W#U zjp}$L?2S%m!#hY&FeHj)L0G)G#A55b%q%Rjm{D{t=Hu*R`n;s=Ma%ea7}hlmYgnv9 z{dz10+H$M|=upe6Y8Dih39$7V+dQhR?i=ssnoBMSvp?jnFU0(#uzB{|Cr?&XHCFdJ z%oo5-$lYC))%Tv^e7~~#vu8M;tE&E1(0QopCly}{I?tX~{n7$w&pWGMS>U`nzxvSy z&X3+z{rCdsziNr^4zhfIkofVSlyzwY`E+IV{u*azp!%CN&MyPQj`OqX>W6Balhu1G z9;A{hkSFMV#kN2fUP#f>d zghs}PhC(B$OlVtTXt1BeNXCA0=ko(%V}MYsi_;H>yToEC4srK~p91JkyEx1JLB%v@ zYCTW4Pr6w2N5ug$8-9S?;#B*?3KucMI?u5K-1u?s%!YRZaSvw0_aSErE{TP^WLV@b zC7|R-UM5uoY+~9PJT~Lz)^kO{9=EYRxF8#DroE6zLnAX=QECFh7H(*6uI;Gn?UI&z z_f&})G#<0I9f&5fvTRud9=@lTTvBK*vz3JKoPwZ-NXu*nn{Hu zdiGtxB2UP0mFg`|4%1?!VyY=O&>EYbXo_JfZj<|<;KTuTj%#Csn;Q@5`gb|3G4Ev@ z3vp#qf;YM#L4>JIOYc$M{#eV2%zFikI<9Lmv-hfVwTc;Ouy^>y;HP>JlFB^vUqs|n z6~QHdl{V$p=?B9zu!Vhp1UVCfe=l;^dr;(X-VAYZdRklsf}9+m^*m(d(Un-sOz_i3 zP;hg*!Zq+(7IDzKft22(%D#KTGvqLAKQIqAsOFYq`ol zG42S7c!Pz##d5eZc8Q4y8um~1Q3O2$9_<4%{B`7usT0F~0gfXySxk;yRkF|*_vH8Jm~x=rIayA!^Qd3%H} z<+b)944u3RU4RmAIMABvx^PmsK2{R|@Hm zhIgX7(aDpzuAh`8T-Ohw1nE^_NS{I3g;^L>2B4ek3F8?3GPE?Kf`gsvV(FkhD#xsF zk>GJ=bj8sxdK0$Uu53b9u^Ma?NA+}VZdHU5*Ng*Xyn+=0E&5k zjCsEi-H)Mbbfk-!N9z1fg)ZSBasPW+dI_9Qk&L%ZEmsbpxO2)2H6uf)1PQ70iy=&| zTW;6WQqWZvga{;?u+m8-Aex=DN4yGMBJK-JkWLyk`V<2Vln7q()3eOOE~-OKzR z|AdBb$$ss`iAt$avSzxfeRiTE?t2ZtQMIwzW8k<5(o0cwbx-8NXYS@tM54nkqYPfXi632Ou zGQ8pw8=Q)Xx{CAW&#T#uQL6)fzG^#7&dF{2mfqMhB%|O~}8$ba{aICJa0qb%+w<}o>F^|^P?W%06t-mYK zQ5%}9s@!#YZGD@dzUrNTWzlgOK>BbAU;7;2?%Mixr9|6b@rkaO=-s?8kn=p}Y!@nkjQL>5$ICBX~%7S6~;GZKn&*1RkdFJ5>AY`>C_`f$G37 zt4}ZIskPKenLnXGr1elJbPAcBWqj|__gB@-2ZL}|<#l5GSS^>h4OclY6wN_R7jRD3#iBgm%rC0z zO2B!SE*9khhhHd)6T4Dr;37U1NkBw%F+FPm@nxeNu4hGYTR@C>F}4TQ%EBy?F`+2} z&e^(Hln2BqzX%eQfcUyn4nMC1d;|#fG6tDAhB^fbGgl*+=WtM^fs64(b0hEy!_V}i zP66kAx>%G4oKR6+R|3u=T`bB2&ILtvT?rIspGIUJaQMZjGzG-$FNgC>QfUe}V(Bkw z2sn);*oQ>NOZ~iB;G7A?sZ-#k;~P`>ykc_aBMQfoP?R<)T+BbC<$8sy=~rm!6}b2* zV7d@+F44uJJb)1{mjZ~0aw&kBQ7#4KDpArDaKwqf46b*M;--M(_l|52INTXZQ^4_i z&2dub(iz5jqBUhL_~(Jwm5O%uTJUcJUufG;xQvsuafPYeBVf%6NhOj-^< zh0%U(6ZapcS`5je%DH&f9=RB}rj?ZPx0Ce7ywLWXX`%17;9G!~v*+W$7aQvm_cW#l zEOHK8@c9U-a`rE@;5S+DtObA4g8!QZKMNDCoIU*({2mK_$b$dQf>&Uwl+(M(f`8tE zf7ycn)`C}If|S$S4xHn@LyW3CU$FQE3;hcgd>y8AIr(>6@S_%-OF%g}^~4cd{`7te z{ZinIoKS&Wuhi=WzNmm}T>}<5cMJLp3iPOo^m&1Y3%K4P{?;PrXTU=RYNHUp?&|JL zb9}>=cqW}pMp3ye9`Bo&h(sbw&6mT`zV1vU(l?MugD^3e8H@IHbSLoB(b1h*KAIRz zN7r?)h~ppXk*0WeawyrCiHv2U-OYrS4<*OOqM|t=ppIs|ZBC{WW66##a4gd%#79#7 zNkP##IF?Q{HaRd+U3b!Ec>$?l9@ zv$`!7kB^OSiAUq|rq$RtIts1bnOJi?KA0MWPNy-p<}&`Q5A@5DxF*-#rAB{ zSJL>#ky{5x`r{jtW8=d~qsxl2tf7&Xj)~D!ISVNGh(vYj7fZ` zNG66~Co+X&5L_z?T%g}FzO@`n2Q9@^7)*`DL7o&2fn$5jm^vl(EmAuKzmDEFAX?6V z6BDIOUm|g3{K}zJUt(x(NKDh``0%O6;sZ6I;R!fxWjvme9*z%=j3v{Vves|l+?5?v z)KLU$6Q=yic-Ok_W$_^?t+2#ESYJ9hoE*t;+Rwq9_QLUU9Ny|Gge7NvJe?dJO7tZ= zhKIY>^$klw$|pMdjS*H;#~+U}te15sGpDqipx>v~gBE^$LqKCq*f2co3@3;CMz^23 zsKnUV;LRf)8`gC%r$O*_OFW(#NT+U%52bF#N55n`ol3(`J{4~%#I;w|-2eYvi)mKb z1!r03DUQqrH8f?VMw+g|YS)!WBhpW4+-dJRW2JhNyXR$nso~K?8k?XT35#)WbB~i_ zTv*$dQ=DFojTs?uIi~Ucu~d8@G15PTB^(EWL>z%RflUufVz+-_)LTk4MeR-4p_;_e zBC%y~TO`5>K7=5RXSRO0z($VpBGQMpvof%K) zH>CXgQ6}DO_y>G=?Jm-l1d(uDcKAVD;~yo6MEc9|X#DF0kx1W#N8@+nKE*`52am=d zFz{st{tk7PB>hGM=br#F5jW|>3TMW*B3k|ugMO2Nw;8x8r^|wGHgNhx%bzjus}1~+ zfnQ_bUAXTs(f(`kXgPfbZrZcMz)ksIG;mY?tbv>KHB^Yi%%uN-ft&QMft&OoqFIn(2jQME#neT1N^=qjGUuVHTX2EZ_;8_d)ISc+B3;vV^f7*f{ zv*5MHx?%b!Y{4(F;Hxe877M=Jg5PPu@3Y_vF4?~>l*3yR|N1=~-GNsog>`^yAd`Q- z^a`{TAF$wA3;rbwZnOU)3%&T`G_?8c=bp>tw||R-OYo0baGU*Kw9tRmf}d`|_4-OX z`NdbS!;38R5ewdG!Sy=rm(M+y$?qTAIO;D=Jm<9H(RN;jG86GOJR0{1B2oToJR09k z5Q+3_@M!#Af=I;M@o4;A#yZwv;H?&1ug`uvKdb2d_&!6FnoQwtkv<1J{f`7@tqlVsZD4f{^2L3&R z-t5DO_Y4vu`SFVt?w1oW=-a@j z$E(wzUuWQ7G3eV3{09bo%)q~b_ufowUXMr1|F(gf{d&@ZAF|+|;v$Yj`Q|xxkAa)# z*dH6XNzeD^Oq65NUo=m^C3wGqoA;ev3g%;O2UJLgD_n<@aFCV z9LaCbd<#xGCi{_Bx0NKX#Cv-k!UB+>>Af!*eT~a1LvB+L^;>v(e%WaD)8|C zX8IHHoAA`B2Gw0be}kVGbqv0N$b^q(fd2`(*=e6(SI&ZT!pr1Xfx!HUhC$ciZYSD zR+QvV!oW$d^*(6e%w>EuwN5iig1!Kfc!nw-WhV`C0Jv?#Pues|E{zy2&g!4+GpUdxI1=jB`Y zZOLdE9~m0FB{{U+X}q})FPyK{i*bra0j9T4DgEejjAb6zb7p( z;B{MV-wn%(DtzPfwTjP9jj#9h5MNe@`OU{^#Q!KtHsbF?FNLT2Zc%8bf z)45H*c8#f@`<~|0xsCsJ&@!3&xzB1oo!j`cg4-$l#U$2|L@E2b3fC3Iu8NkmZ6TzD9JRV@}Fu#Rj*UI&iI#nBx%)v%I~{a7JAja zhW_LJqQ^s@A1(w&{fZ1FnWoK6T!YBf_EtbHL5VVL{C;|Woo5kZr>XrBMUi$Z{pR1d zvccbfo^eF~ef%db6=EE-|J>L8{eKQvIsL8b(4ha0O-d`(e;5tr^lw&&uNlQi|I>EA z{?~w&(|`O;>gRjxa{B9+q5?~45T`Mz7|S9Ci28wLx~h|CJVJ~un>2D8v# zppb>*O8<<)*?+zR^6S4y>G#W>&p!x9`YV|gerc4#NDgFNb zSDi_2X`3v`U)su~^8g&rvVZ-lQ~VbR>bW1k{jY(!-1rZu!~29{rTypQ@!NkE^mAVN z*NDTqUEcQ33 z7wfJfr2Sz$e*3>_(Z5saZ`O)buK6{*$D)5!>F-s9)X%+6^Xq>7t406Jo7Devi~gsS z{@sTDhBEqJvgkkYCdZH8p~}TiFApn7x}WSn*D`Vo>gB5-cqAP>_L z#V>1fzaP{W+&M^;G@ph)toYH5qSU1L`CsYHr0v&O3;4^;{|VLq9ZDhl*N8{+YrnAB wr$UGxPz0{phq;|ohM%9!Frg&vT_mArHG`t?_n&u%a{MQ*kPO%O8vXqL2ZQcGT>t<8 diff --git a/UserTools/template/MyToolDynamicMultiThread.o b/UserTools/template/MyToolDynamicMultiThread.o deleted file mode 100644 index c20a8812782bc756045567f0e6a8a42ea2572177..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 46912 zcmdUY4}4VBmH$g7fdHC`|D&RgIBKGhW{`j3k4+#$-pB;A1S?&oLkJTR&A%oS{wSDW z5@b4zQnkCb;ubC4+Sc8&)hbogD1U;rRdlOGt%_a$Fd~Xt5&uy3d+wi^GjHaR;M(2a z?|yiB?|t97=bU@)x%ZxX-+e=;*FQ5eBg4=s!?@H)+-Vf&yA#)?;B%hlP7^GtPJfwQTueK=pHu4m!wSJ#y| z&sNt}IR66#58W8fbBrs@@L|a${K0%dlKi}@bF$Gf{P9Jg z7Ei0q$dx9PDcekXnNiZ^c;e+bDA+tc9M|G%$BfCwRa?zyMZVczkw+wExFW~XR~N3B zq%_`9k#7iX`zuUIFckDvpex-`QDtmhSGP`4md(AwjJAlP2~$Kn3qZBo47cn7cPX#p zfD~*-ySu=x&iO%~**{~MWIUr)%M=fVSmz1dci^cRc9{L$_Yxh_RrAZP-s%f)leSn_ zb_GO-Kauu-FW-#FERowbfYyvWzK%|XPnu7B?l4PUaCYw$aMoTk8gl3Oqwf8 zj!+W$HwcPHq~s^f!q4M**W!skGWvtc-uJQZYnUZFLMNJq2h8x3D(n3l2=0DA#|*2>ZTVFAviV!#7e{vO zyW0%EZ$_yR7@j_n3H2fDp{$W(Ms5=m|5kk$#ZA268$S}j7 znBkpf;V$!uLon8kw*NFso(l~%!@`#S$S=v3RQ7qkNK>APiwZN+k)J5+N|zb+7W*T? zJo5dsq!eX}%HB6aAbo4!@9UiBpdp$Gykl>iMSnNRpk#J$%h^mFM7+qdNt9h#_N{ms zA74O}tKKWfq;iOc2zM>5Go8(S>b}m|vJ=NHa%13=NA1i|Pn?B*cX1rvmEkNr*{xLW zH^bY-z?(HN$h=dH#$eLIHmSwHEOf_@fWF zH-N%(&==_hk1gVk8b-c9<5P5NN?k9UsWIBX1CBgAwP=J&M8`ROKO3MA4*{wfvztmZF z5>rH9fcr_yP2R zqLP3K1eEkSZyF<5uXXRl6VLP1ct7wJ?o_(@!8dXWo`5aPpjBQ0C8kVuCl5ZTrA6fK z-Bcblfi2PqqDD5lHwYd{H9|F^J}nU7#aFvPoq!PL#PSA z7S*l#o7i^o5EVVwU2jH3H!oPl=6-qPi065;`2_ruEUW3xJ8y(_>)fwGzSB1>}2-h=4HBSTC_>t284 zd`LBX(G_aIdw2+Gp}qbc!6}KYWkl6O>ATL=XF}NQ$kc))a?;{p)&N>0#QPAf_##V7 z{oypCQyAYhc*YMyxzuu@@u82J9oga{pQ!9T;_Rsc&$6!0N$9)t=+gPv(ivJCGu+P& zqD^`O!nl5EbbIR@#z1fxtfNMZvd!>Q^sXX2>+UZVe9cHi`hpl?&BEV_P$T>&vjpbd zA{b;9?kJs{(cirZxhZ)Ll8bnrkNbDzaGqUe$>(i*dJj5# z&I70Jzhx{Lj(8zH_+PuAJW^Yau0Pp>CWBUix}#m0(_fJTA4K2UZyQq@^*6Vg2+{=?)EfQx~z^wu5jQua<%@7K0H+&eS|Yhjtxb> zuiJwTB`G|AjOIFM@Wtj7>J6aztRo^?KeEgD*n!xkxaq$s-mv!TwjrG*_B)IRb?f|L z87KK7LsQxP@e5*iM6&D0jp{FC14)U_o#&JqWmi&k`lK&>1VwC3#IheFjHNC@#HiDA zGMpz=>{adTVt=F-jZ}p#h$ixB%-us}3-9%3>7K9%B@jcykz%Gdp+(|MCC zPh4f7M}{Sw4|_3vU^AoIo4um zA^j!^YU&4`&Lt(c$UqhGR$e^b+9LA-%U2j`XA;?N7uhvhQT*GMc4Q8(7luX!5xykY zwy-X|*8N9ZqlR9@DM*#K6Bjh&1okJKkS)tCDr?0sDtsei%JBBe@O%55B7UiiEOO`5 zpncxdDFu=$lTxe8CntL@BIk(y?x7`!6c6|Z%5Ne5p9Uf&hX1n zNlq2%kxAl4qTu{pn(8 zApCv}a-%(#Qn-wGKIER~4+jgByL*Aar@*QR*)s%e z&61Pd)D^&nr{VT-YLaH;IJZCi7fcWKQdVDg1tJ3o%lAi`Ox=sXGNu0T9)HGOYPN4W zd)`2HfA~JtlN?4*QU~Wmt)nLyjVq{~d!i3Si0o+9Iw>yK{$_8rt(j7%hDRvfK?JgC?P(iGi56MXa%}49P zx4XO0gOZ=-(TsQvZhR3xb(iJtd1CHB-b;_}z#}R?3BgFQmm0=0V3c&pX%g!qCGd1=-S~IrEnO*36@zv z8PJ+N184H=nA{q? zTe%gL3|-BnZba3ySTa$Lh82J=DleX`p)+JsohrY`qHE!-aGzx9C3K4Od2=4^pbul+9~-oKi99%%k}F%d*(l~QhMnNLy6yceLb`c7EguNt8kz_trM zzVNTee-R1MS{DiQ_1{GPKuO6Ky~GD)GTs7(XjbAGU8AIXaK@NKeF``=s&P~-G*MMi zGs_mwuBySNR~x~mC`i%HF$1m@??>CmPC`+{thpLC8^*PP3xw=fv$DG6P>}D{*1WWQIl*% z*;E99roVRZOaIbA(?@ELUA2hGl*KF!jQi=%931ND&641z3#SrSZ9sUR@7wH%qb*Sx{j_zWDS} zYO?Rvx^*<5^}0upiQ$5=p`XywPt1zE9(&@bA zLK3vX6LHdKp(hRMItYEf!tn23azz^iD`CnL%zfmVocg(zV;u4E)E^<z`0XG$8gtG{RH_tkVQ{3RoSIdlm((qUy;1h&V@>WUiiE z7QkL1HlvholROWj8e=m$)axE9ESs5`tG3ZBelrO9s&wL&F0D^{(Xw>^A^NBD$i>uL z7^N38;n)`kX#IhDd+8_-h~iObBtkxy>>G8}0YtU;kvgin(bCb63M#4=@dzZuEcRpK zE&9O(e_0sfza!3ikKiX*3njps252^=AlBj$SQB;Zryq!Zgn~@E*B+rgBx$~V810d~ zq7Q`9EB?))y@LGXE6ul^J-(c=~|$ZL{~O(DCOoT{-lnHbN+f>SA`{w587@_jN5qBCG`jCHZ|@ksI$c5fCM(fIrP zhorW)yIzM$~o34auR`WCSS|I*mAqLsmpYP_at`=Uc?cu&+v5y&u> z$9#B{s9zTur4PcGF@*7w{km9akz@?YMEwj=$G&_I@rZEcuZQ1Aif`ieUlmu*>q@G7 z>@Y~~*@Ws5uPgTO`xHgteRc_ZT}f}-_e)34gf;N?y1*Z~rodkkqtMMc^EoV_gwCdH zKH++p-B+oGgR9=YaDT@rY>V(5@fYqjr^%4$rzB_^Y=MaAc3g-1pZMUM*e$qnJ}4`E z4PLmZhN;?JtQ*Bcj1>D!>=^kpb~n!IO{0`2w{j@iC_(IG3OhLpZ=%A23q^*m*y(7Q zNd#y&rLigS8mgB3*eej3^gM|f6+kv|QtK9U0W#Ow#1;ATViyo*lZU*S7$jbgCcg&# zsNdRd|NW>x6DSrsl{$)V5)zbY%@ z%&%yReUL)MTzOd!O9>UV({?B-n|AD79B~rPRwrN-=H3i%trk zPZy|o>nT>j@Cpv-CJe3Ajo{VPGAybhw+RIEwtUgAe@HGC3DT;sv*%U_rxi6?e0BC* zk4Kf*^HWK$70(tovvKp3v*!uW`6A2lRsjocd%p$G_0!TDpi*D7tjZVpw%8OjT)4M> zgaw$gVlvNinT-l6d{8!m-d93RzVJ)?eionk^}gclErD1x*HV}k^Q8HLtz6F#Z67<3 zz|q-r2FN5EJzYfRo>M4u_|@1*C@A`2fB0D~tm8Ic}f6X9`7S)P?q< zGlkCdgc|4G1|2?t`1-apR`hohwe~yj8S#Q5G$)fw{0d_6oRRS0XlK6I_=oO;q zXDE)6Gctxw@r7THZAWjTUy=dr*e5&-YoeuQWL_THL{&mfKlH~^NP_3c6rQJ1@S78x zZ{Zpyd4%m-)Tv@~>3+ zz44w&Zs~;&>Ds>iG*dW)Wq>4>Dx!}4K(o!g^%r9fi!7rZi{IMD)CZ zM{Ep*H>v=-$m^17SKc6i>Y{EN)9g}b?>j=XL9DWW-3#n|Z5Z~OM`bG3me~-ujG9uB zw&`EtIz_4Wcs6*g$eBw5_171FgtJttO=*1EeWCme%x)2}JSb$VlkUrn)B?0Qu^ zLsc<1nkurhhvEsLOU%!lhGrt7AB83xNE6G;lr;idn`RMgEnfs1Trpzfq-1oUQj=R% zm@VxX8!rY?yh=>_c81v__82l6M~-A<6=g3G=M&KR<9^0>aBj%HXP6Pmz}3^4+20#x ze3qGg<1pietnANoj9Avmc*)*7B>P9hjP*mZZyjbll0(;j%mMlSoNPP-_6F3SE~@L;T&)KIYQ)hUn<}K!hl2(^b0Bw&zC_vB61O*ck1|D!wB&awihZm zFCMn>%2%iXZg;F^V6aj>l3mes}gvMh2N(54^-?yyzKTjjaV6u66Ha#9xKGI?!kiR^K>gM#2m zEC}X}Fj#}BZ*8EVxjonx!ur*fKJ0gW7>1(cjBkkReh+(87Gs7w(T}Yu_%23#ld=n2 zUEs@T4(dJPVIMwPae8-QBSFS4GyIYB)+hXhADNhu_WlSiL+z%Q-spK=@t`J#Ru;bJ zkFGc^hhA-C6+P>;S<$Qu#T%+VyxOKo^Yfi!5GQwuj5}U!v{)x4t6XeW>W;S z*U%mK!&zc68eLVE8w#B3dA_&L?=(O1JAF@*!SOZ(0sE8UvnOHp%EAwPt4K&^=OSD{ z@varM<>5p7wyXI!eNu(+B*$OUnjiA$8ljC)PyszBBIJ4(%IS|ZfbeuGBT9;5#%jQI z9f-W1gZsL5{o$LRSUVm7j9rU!*_CFrA_qRJJQeFc)!5dL3C0d*&*db$(k#VwCp~*=8D)}Q6 z3GbCfrQy~0Ug#2j4R_e4(R~9xHFSEn`kdu(jF)2zVQcYzqr0=faQ4Vm_<##bx2UzT z*b$2_OfZALo~#SOv<)saOmxjsXySeQN+DFfZy#Dt@7tF*QMn<-)5kg~=|3o^E3_x2 zoUJHlE6TaOvgB=udr_5hYP_7Ms%~l;^zQ6DPF2;psH(H1AO8*Ro{**{*rh1C*?oP! z;d6Rl#j%}=z8FIRbe0L9RmwdSdccWHqX*d0Hy>Xg(xqTU7b4B}Tr#(%rEz9kO;d1b zOWU;`&s?#l7^rDm-0sy>iL;e{9OX_!r*n)gsa#GZ<;$CEni^^=I~qd`OoAq}J&zq{ zm)|za|CbNWyy}vYCa!tKj2V+%1r@WeD0F!|MdOQ#U1N*Kjw_z%DRC9d2`+M(H6it2 z%-BN1z)ob5GOn9rWGu_eIPpY{F2a$pgB61`<@Jla^lUw0NTu}WA1H6L zcxK|38Y-ntq}=q}yjwD-=jPw+n4as3W=+p6SespzTfBNmS?;9nq2;*?9RKXdodl#T zw*VBNM2_i#*EnR)LU}2P@y?fGmZH-ELmo9pV%%G@|SHy?U|`ZDOOF6Fo7Q&qk{ zP-c;@JdtlxZhpBELS+$KYjB=Qw89Q8@pASkKB74Y`z;gC$le>&GtnAxuj?e^x+=e@ zv7vVo$1`=kQT@0HLAn7)7F{IDi)4?2JRDqYSuV28p z!oJYsDE@bO?w%~imyWa?h5cRNNmDFJAL9@_k*Ry=>LF{hqggjQZpoxF+@~7i2yEjR zz~@mLj*4uEoSEYEz7fRhkm9uHQoRW1}ZjKySQVGCax z+m_3=Se5y3T1%9V$b2`gz7-=fufzk?tEKh&$akqePF7`Eej2Zj*=ckT{lR6q2eKVI zoN0y4BAzZB^@`^^D3h#@1KGSjs2xtt#vh~$>5u5U{!z4By0uI68CI6@VPfEAgbvaA z7@#g4Vhd$D{Pa{1h{HI;20zmVKPv?%x5$J(3)B*yjZB7% z{#ubZ#IJWG)6<e>l#R$e(p`7!&nn{7m!ROoj{J#?!ZjJ_&zQ>_k3?Q4-J3FC4}cejC4b$WbtU zuP+?$vIQkvR8AT^Rj%ZdDp%rma@9a)k>E)221(#Z@dl0O zSU70h>LnVd4`)-8!=O<(H8~7?=aVKmjN{XwWH)?7mL@q2XBw2~tzIk190opAOOqUO z){^qXVOYIfk~reCt>i2YLrk!fz~7;2N3n5S*XUaUPmJSwL>UHxblRT-{9M+Lxs;UY zHuOH=qr@+6NkYxPN#Kb%VS?iGec&g?M=v$c!?Vr6&ovVLp-a*K*@oT$*Caj*6rGMY zE>d`x!b{b;1$eshF0sK|ZSco|WABsVs^);Wdjt5nMxx)QIgm(bD(4!B{$JztBNgfR zOcS_U=QjSJt^xwTAdZW9A@0@#PgkzpHh4BV>~!>}0w2ZVCC$HD;E8z2t29^#ob2DH z?)7~0AshTPh3l?~=9(f6g?%!s^qrx5gpt70={eK}FR{T*;KJWjm#F1lqwp?;i+L)v zxWk6e3pV(xHaHbz6pQ!7To(K{11EjfsjgVeOM$;3=ymHc3^DHnz7SnuIy*Gj;4LtAI0V!VhsTHJZM9I0ft_(e=(j@qB%RIb->ffecJ{v1#vq1ZX29F z#7{@>!qA=$ud~6wYlGivgTG^gUyK1Ro!nj<{2m+p0UP{D8~k}2{9PM76QN9=>f2;Z z$-hp*IiRjb0za3{`}9h|HwCUop+w$=Hhk6y`ow%txry-;fhXpJT8GC3UKZELrLO)W z@ab_pUtJBs!ne*Fe~62306#arqAh=Wl%CHPxE?L#N3Dc60e8`bH3j~0_5klN%8R{A zitvuQrV;x%YT6Cts18jE%vl%+VUM;CHxmPa+GWc;9?w|59mH2#9rAc;>ucITSkn+{ z_tko=DX$6DRHCGfO8E-^<254av`DYZVRibo9wSY%j;HWfc zXu?WIf2UY3)P-ULwA;b3Qd$kbiFyFR&Gu#G1l3Ba1~67fdt)$2{`eJ|0J;clTEhn- zHMK!+Q&ZKf+NPr`l`xd8-e8bg!RnpwQ9F@xZ8=-k+GtY4v-!oBkHGFHH@3J|w#53=b*(Gq7<_;0_AoOW&X=|!!3^argn!r7L zRUoUr1YF8nFP|Hr%BXE!4uzYSQh$XSDb_X87;Ih~sz*rE*3yRBIC`P1@d&8Vb+MK< ziI11qgNqh#KV+&#KdGUmJ%D^c)vD&s{U33pKBq*g0mkX1E4c#@jiWF{1bJyXFjNp( zD&AwM)`i|~LLks03$FH4rqy5$#g0-*MeW0QUkp#25U84knrjs7TT0*qa$Baa$?Kg{ z-Ga(IYO_oZ7`Y(_8y8p2T$0+Hy$FY8f#bEMMz*gYf(ljB-rlgd*$aOjrx?cr_+#=R zJyh6-+7rFYg0&qXYKN`pgzznApb59j0vKzqwLqb^I0ss+u-zKL1MGi8Go@ zy#E+vM>nyyrKz>14fA+0>Y>g`v@dB#twC(n7F>u$hvwSgLK{D)3`i%mE!mpCYZXkXr3>#eG764R9GhBh=g>LRQaoCxf6oS5DNs=uNK zCT0go1i%&sFK}gW6NTEQj`da5NFi{76unLG@~~S z)z%xd+EfGQ5ajAE2zOCWMMSIPnsK!3qV0-EHwl*mSYT0mOQ0V9))-XGP~v4+CP9Ez zZLKUzqzw|^z$92gK+K0FvYLerOFRfJ1GSA9gafox0e`P+p@1e;R(u|sDkJfzdVr9Y zIQs@cldL1fsO=5u)wJ&kr7YVSv9fM!M_Vwkq@gX;QPYTb*)m;((*hj$d|V_Kt|`&$ zqN{Oe{6Yeei2f=Z8V?ePMD#T{H2zZpk!WeS7>CC1BM^z`DX+#KBoK-4Ivg5*i9jT) zo(}asg75|$n*JLKCq7FUUdia|8U8e*znM{hyq)16DV)qPmf>GAdhS0{NO2@Ot;V7CoUU-v|0ag}7`}$#TNs}(!{4>R zUD+T)vf5{c4IWZBvHKnlt>^WOp4)Q=qv!fOuW+)@C5+F@j6TBf7{k{xdx$_v#{fyyU zZuW5~Bof(yc2{dV9ItS)bA;h%Fr4dC!f>w7Y=(1v>KV@UX=nIaCbx^>TyB)%T<$## z=W@3&oXg#*aBDl=#pt;mGO!Y{a3A*NP$GTq!lCuwW`qA1!*68tOHafN67l&l4$bH14F4&^N1cQlB%=RU9GbpR z;p8Vgjw)t&Gox>1d{#4jDWkuM;WsjT4Z{yHK4FGmjD0PXNKcO6%kb~vnb!YT3Mc)W z7~XjbZjgx2&v0n^wG8L{H!z&n;{!JMlMKI?@gH$2ZjeYW=W_wWTkuTl^REoQnc+y3&&-n~F9gLBvole4`?U}1^lFR$~(;0mUqjxcUGQ+3Z@bTN=0UNx<24BkX z4NUGjg;TkvF#HZi&&%}$!+C!)^bBN1B0F%rQsHE0-rm2>aNgdR+wghF20#5wk}B(E zD$crI#wgrcFSjy0$mmZQNnBFfQMC=;%y8bm4lXpS=v{{BvE%j6{4m|7k8l zCC=$?&_zsm-Hjp4T-tG4rya}#{Hf6iw(m)p&7uIK#>=kzZzoYVi)sPuCC7|!{; z&2Y{q@4N&bPCtR+oPIvTIsNqv=k)6t&g=1KHu#GS=k_dfOFc7jnubH$f2P8z-v5o^ zl{WNqZ0Kuk=<99h@w?$7S@j9q&~LP%|G5qQCL8+S*wEt#;?wH$gbn?hHuNzY`U5uf zpV-jPlIn=|#p|8=WJ*~$IKEb(5?_RCUBB05$OmL+3Pm;kwpvHh_$Wzcyl=tBDg2-X ze?Z}Xv*6!X_z<=Jqxt_?;rau0jjJ%#IMqV`iK0Ku>_+<2&tg%^K3|?SpL~TMZ^1_r z0n#ZJyjP_{EC8(t;N$`YSB>oeH0C z!FMY>Xu-cwc#{SHrNTQb_(cwBx0M$BWu<4Y1$Qd?s0H7s@E=(42NZsn1^=bu|5FRT zRnert|{uO}p&g1>5mzhi@cW`k@0waP70_ORmgKc`c&;&W~A z78`t(4IZ|^H`?IhzhO+H&vQ2PFWKOKwZTtRerheR{O&r{4r6WTXW8J_*x)N{@E#ld zhc@_cZSY+-xNgVR@_t}LpQ{|*N^kd`ZnOggDY6H>w3u7 z@qfzf+y=&w{bnQsK6r?mn~*79~rI0e7S248Q3KV*Y{UF9`x_+O=P zYk9wGL*H$KM{MwoHu$49_)|7Gw*z0daoNycsBo%xK0l?oKP9S1KJWW`^Dh%pQ~hVS z4X)!I(sLP(f&6?l$uFyXDXv_W%PF_ob0xS;WTHYM11(XSjTr({s#ngYIzUa z;MuG{=j9#2@P9{MZHJQ>&f{A?KK_c)&t&wx-ghvZmrKVlR4&f{eMZl59mf&wZ0L1-XVpjk-bbpPzh%Qm$E{XAIzA; ztDReH=x?#X|JerrcN_c}8~jBZe7_C8O}dHjlNGqu{`nlkxqaSc_$o%Pe@~0-a3jOD zUy=Ar89sy6V>iPW;+hiixeJHZ=Y9f_tT^wV{{_!A{kH{mD*c@{xSq!of6k|n>B;q> z-@B(|)ra@n8}MA~!}r&5KJ-68Q6fI;acFwFr$mqGTI0tNh(y?b;Lx~wZ6)H>Uo-q{ zTvH-GWJ67V9)U>2hsIWo6Gn-OH&R?B{~?@W$UFtBWfDREn`BaPcQpFVHuPJ7r{n)H zqu0-g|AP$ImxSB#c~leO%=!G5;k;a&Kc}D1==0Sz$^8z)IUmlS^Ld=nb3Pn*DH`I> zanAn{#)tE-VL0cn+ZsLQd>&);oX_11=X~@U8$EU@8q!nGSqLg%_%-U9PQ?trmf`x6 z_$<{oIJ?v}`E$0qrjw3830GU^#8XrJmHw}m1_iQ+;gmo!aGr3^ow0F@dUo~uEe*D&14aJ}xK_32a3wlew-M!%cklt(mJ zoZevg35@luDBF;M9NhSTLbb^jj2)hlI@W0H!m z=rJAocYewkPWotF^*&yrB~0`Bfrb7`Jfoz4hg0*{zlWpu-x7Yhpp<`1>Nt@ISFaq! z{Z59{GyS_R&oP|zTp*~?$0{c#TGCUGqvtc6=>5u`r3_E%pNra;H-%~z8b$apQJ(Ad zRa>yJrU;)J;0GBN;kO-(qJ{14h5(A>XEkYm%mCjjt87GN`646f^91pg;4zTX_t!;j zEu;%+>nXyIJ0uDFI=>YDd;IK2vbOf$DWoyXQHZ5kxY60CG=u*lD2>ps#>b)$zG!T? zHrTk_C|X>L&t{g@23tb`cpAQDNvcfzu*cu6GXK{;`_?6b=l`F6)os^4{x3f?|NB+% zF|^fzzBm1UL&>k>8&X!1_D~JJW@=RpjJ^}be=pHigC?nZ({x{VC3V$al~?q=Xr{=B z-aYlZuar!(w@$}jeIh2+9Efk6IKgkAZ^qIy9yk}1VE?6<4fA8nFwjr;q?MAADQ+7Y z0xX6c@HcqUi?APUw98oiLHsyN(b0ab1iuQ^M4xOMMfgA0f<^eBHO9a%@tM6;Ay^-% zqyGd2!$eyPeD!New<6G_ILMkx6`eK@kpF7XQqr*) z`?Ujs^9em(97 zD_{SgJ3K$l`>pw1z(^Ed8^NtnrR4woe7Z{{nv3#uCT@slDF<=gMaDv!rzXKvf2ZQm z?J=JoB3*!kM5*aFK_Y!d57H#1R{Y6mToF&Z{8oDFy$5*-8$uIpp41CmD_?8*X`LWl z{ydeR`154VKL-zm{Vei#cgtMGDl^%SY;CpQYGCQ)S1I`uRDON3%3p;C>E!41$Q-81 zNb=9ZVU@oL7|GZBk#H%}Y9;?08c=7VrKHc>)V0^pJ(CdvP-;Kj6|MxcX^Tx>hQ{^fC*INF58~OK!WtIghC&{NKZIypA z{2nY)>|dqidsTjYvdVWNe>(f;d{5?BrZSTJ0vuNPS8Mr-PM`is$^X#O0f0OyU8&_K zp@z|=%F{}f-`f5TDBwCyjB71lv;DrxpW^?Q z$c*~;+cg`j{nsLYI{P1r${ZV1!N~qaIJA7-|6dOKkSSU1qR3%pZ>HkY0U99r=OqBT-Xhr31O8<2&@~4wu z@O_!VRQX6gRf$#pDOmSSC%?2rX4JnkuGpl=&qMxn@>`YsK9!N=kH=w^|GJj1==5pR z6nUk(aSf-)r{8H1^5YcpqWD&M=IxhEYL)*H^3!_})y!lj{N1hOYkK`a-|O>!8~OJt z`PxsZ{A5y{Pvd`6<9u^#c9o6(2h!+I^KYGB>;LaI z`j;yCE-gr%t@p3n=)XYa*K#x)EnnZiY@@&HHc4SBLbCsK96G<|n>SR}zt#R-NZs{k>4cs(+J>{LMD@*ZFms>TTq2N+Z7_jr@CTDO{sNVc>d%Km=hyZ3td0Dcrp5lLXehvK2M*h6pC55R7N&YMx zI=_~m4S@Q8YyB51`2&@o7^bWL4NCqxCciR`e2POKtRTRr&Ea zDOvAJ0Ho8u_y;nZwiDT}3Wv_G@oNB(PQ{Wu{&$&@KhXT8UFDBwR2M%}`K`FM{A*Nx ztNrd(`R8GL;YsVSv2FmO{2EYaQsbom$DA*p_-1*%L|`~5fEq}vPt diff --git a/UserTools/template/MyToolMultiThread.o b/UserTools/template/MyToolMultiThread.o deleted file mode 100644 index eb6f3e275c35b5a3c3a976c8a8b204f808fc7792..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 50032 zcmdUY3w%`7wf9MeNEFP3iZwnNaMZ*{%phQbuY_chGcv)Hpim#3gfKvANYZ42pdg`1 zkm(TRR$6VvHf?G3-b-8VEk#t+pa|H9QnfAhNn5Op_@J$Fv1bzoSDt5-p}vb zKTgg$|GoBFYp=cb+K+S2u*4s%&d$oRbjq@>w32rkwXF8F$?I})S#I5E-5_XB$32xf z1IL*-&cZPY2VrAyoQ=ba;~X64;y4cnclU7^mCaIRF>RqE`=xmsP;b zbzO(^7u0n<&eL%4WXE#ow%(3@WcLi_+tHzRY)QG@Us-3DZHwgWp-e?%$8O8Dd!8zg zbd`HHl0jxiD?N7E>ye>1#EEp}4NuLUUF;s!OE;E#ly$8g{X{Z}zIT%#NzLzGGSTWf z;fYzG7P<9yY`P6)DmIc{c0cKIBJuL=7i^w50oUScYvn}ihNmi~S6o+dZN;=}WewiW zwPSbPOC@6u4bZ9RS$p$8UGCnmf}I`xAK#$woxPufUj1{k?C2i5>;retK~%$zjf@BT zFMJ0@tnOQ(((A!kXG0)Xoon|m@CbQ*gYLe`;G#fVfPvWjT)S_71XerFcD3&b#?B!Y zf&N9_TtV>2ak#bn$u+EC^i5TWv9)78Qs27=h_dKOd-FeBcG+9*Wp53!tSPa}3)8fC z8X1SwA}KxqYX)MslxGy3#<=Ck89ySM$5F2#UOQI4XG(07XCEq4+jr32I|@82x|ft% z?%o|PT)H2>rBZ8SN4IfHu`&E-k4x8q-bmT#H?*S>rk4K?|I*#dsI0bV#wz11#pC+Jt8lwe-a$#NxSSo z$8I}zb3;|EX|5gfr@F_5V5vY^7dy5%cUw&^1rAinu8Pjtq7-8#)7ZR$syNtm9mdP3 zi#WV2g>A|WdGgZ3$w>N8>}O=0M4RzSr?EZ%$k{$3tdiO@QuxtLF!rAVB>aN^6b*^K z7})%YOW2TV@u=v!!j{_Lsg_FCA{c%5$f_{s$ax;R6RcI8;2n4GY2aDWy_h<|-KY*a zL04KQP<7Y!Sj;=Y%?+^|(%tPbuu3Zx4Gkw(+g3A(Tp20J3zI#nXG`uzsAJu5Q^k!H zH&uM8BJ>pEVKDlh>WY5raf-3%$R+k=)XSlC-NAzgL2+=0`|*A8O5DuWAwC#AsLBmZ zFE;XHDt1s78;YB4cj$f=e+aj#`^zp7eb?^4g2ed-1ppCMD&Ut!eRR%1(op`fn=UHR6UI_s&GvsWrg3=npIn;5#pK+(UX5L7$uXxlCn z7V@^%>{I6I@974&+3xoS?QN9{B;(3f z*i>N0?h$TWcFzDPBRBeeyM52t{Vm?yggp^=eqqNRU5R`f7Kv7=am0@OjLKB>2fbN# z>~3Nci2lls?z4+_*qaYv#Ms>NlwI~aj6_i}lAz}hliI#_A~OQ9dHFUjYV257fo^Gd zg~#sqmjq+sd>ZS|6jrd6AJq2kj)3%yy}wfJki@(8Bv@?gAsLkHo-GB;t>99<0^ySB&Q?WJ$;L9jsX#4(d{v)m(U_DBU{7X)KXh^xI`5B0=ouXj7~ z$2niz?+Iq@iX4yXxO=yarnt1mTZfE5&&O`}9p{`TuCm;HO`n6~Ztz>n6`z>oL*xY@ z5B6{JHsCyI>kto|tk+wPo7z}wE}6WbHZ~hmp${T1lnI#G?IVz{ki>_T2&PL_H{&ZzkKI zps3_!i@XJi>E|X{^s-ID6%sI{1^d@{Ef^)x-&#_!v8@vv`qz8O^1E zMu2EWqMje_m^r%&@`vCC4S5_+VQS+8pdE?>*ic}w^;+*B&eGOpPrG|x`MhwGQqj^6 zA}bh;c=I2{Mfcv}h@-W!c?Hzx#@5EVFmqe%3H1DPsQZo&++enC@I!>o+P?knzH{;| zt8Z|@Z-UWxP$C~Cx}FWjz{xI?5+m2xv6~9fUp;ouN5{FBMX-pmVc)t(NIzw>t}zvx z=r&OFN?A@xLyWXSwr5}?*i-uK2cfqpJmvQ$*1OC|27H_^N9E4Ahne6D(xxLMAL!PzOU#Bb^MZ|p3?t3014VcO~x!c|QxX^#CiWQ$AR(t{e7U=l|`d^GI z=s(1p7A)3^U}48r30I{m?yyPEN<(IL=(qqm%NSWX7T&_vO1FLm+(03z7! z$)din0~I0{g`v?Cs=WE`q86`5)X9!PRSbg%X*MecU>b^MBgm{nz{KP;A1ko}_lRCE z&3UPCIY}a5y8tA2%nbIBPd^BZ68Ul+q}tI%-Whgu(-3ik{?<*IAf%u}$#yN4p*R@3 z*<;5l3w`_H$O!stMTjDS4Z*B~SSl)YD`ArxN`#W`Hq?SB=F&pIj^5~PN9Bd6KtBR2 znTF)j&0M-6JCHpFgl57Nw!275au@Y_+kw-$Tb)%`4Y6$cS8=b)pJpOX+7y_cfPOFw__WQv0>J2uVhfvX5c z@I4YjMK*$$@0q{5Z(r{n00bWygK=h!5bS%N)W8gs!a5063d8fHTkOkn7p)hpRP`2l zLH-wYa}-fgHfW%K{#;cy=q-Vf_Ce-sciU=d5;VK0d4EMg&!2P15Ov76`zsQS;x1Y( zn;VhXlL)!WvC#!=ZdN5pGIxF*##Wqfe=mI*$N5uilWBI5{9E;?_%-G38e zI2z?9PjUo!`xNQsChM08E$z(cg*@pcPCVBfzf# zIYxlDj(!AKD)i?gz#S)1O=tu-5m*0{IB@*!hi0+ z82)FN!#`>v1_&Mg=N?P=ms9i4B>Y#QAEk!>6A({i_}_uve%SCYqCUm|gu$bV{h(LP z|E~ppn*UO;Q_Ro>lfi!$g=QW3t5YL?HCjqWS9j5xV~G3>6!bCb)8qoZA5#K~{43HU zKV}|jL0=3N(N%)@*dza~!zc#ch&dGo<>j!!6D9bAEo(79qM6vs^ykl_AMO;>#+4wT zk#0Z`is{ye{(VtL3nw*ZuEMyzjTDZ01QxF;5qq^xj z2-0rc))LX1LjaTwy6<=m7dB?G%aKE-D#do={y@=oDJKTsBRt|Q4MzV+`PwBi4gVO{ zbz@>&UnTU#0G^m)r4Ht+j;%?jQIpb8Em}x4sct1XX3M6WCYD;FT?1;A7xRfDZqgo@ zFbfWpt1Mi$-MwrCj3rhr&Q{p!p9m{mYa>ARFE01{2Gt%g%^~t(SwSC&Z5V(x1D9=h z5GQwUC+*Efi|wARxq+z2qPZZ=CiDzkHG+)3Xcsx_hGA(^jTJyQ_~5|^W+61kmy>io z3&&JBIRTGN7L7o>Q>=mcL~=4Ndj_B31YFmm$Nb)o2EBDk7n*<9WBzT&YLTVq-_yJZ zz#rPt5q9)BMK~G#ty4_Fb z;MzDrFDB$Gg!>#OBFpKka22X5q|oe^sz@_qN_KxW<`(P4%o)?HL4rvQj2UuPj2SOU zrm4E9S?VUS+-j`Cv?>?dih6b&h%KYu7>IpyK(MRwwgbACu8YLelt9^w?w*hG=xK^~ z7ljw>C?@xFphvmXL+A8=(02@Fyex<*Iai2`MqItTh6rxp>78vm-Ws_1CskFpa6X~x)DKs||EqRx(uCx&!g zp0sC0bQuL^JBE!6ZF01rN3&Ao?CTQoe37F{)6H_U+ewmIkc4Oe<&FW5&u5 z-JZHH`HJqgw0x-pOFgiW2HhkLGVL+q9jjf+FzOM%LsU~@9TB89HXer z%A}!i19_q+zDIROj_Ai&fF=8^?4uMysS}fO9um2ZZcs>fcw;9H#9GVke(3^w%j-7m zpUDm%eN8`CK%6h6T`+zB2wDYUM7hQ`tQ(Z+uC+l{o_NHgwyQC4+iJBIIfIwg{Px}{ z`$48aBSJ+YJfK#ks2wG$X$jSog^J$)#!Mh()X&0*BS!rLVgNCY`f7I*yadn75TD$= zBf-s98=mgoA7KqjJ&D|$gy-Y@cXylI&UKGiBX_CjaqBmT5?e)rRdkgIZ7I=iC>Uj$ zPKarfoEu{3SgWQJm26N*I+EU?v5#tR0N#6}oFSs;(&SL?3z3!K_BYagfa)VEw*j>j zJrJ37$*z|yR?hpK-jCRUQo<)_>3O~6E;b*_ zNj2g|wIy#$%sa$GfI{43a;Q8$Q!8!~ot|yGqphj_+LiQ1&eAP=AXfC!)aQT|+p1~s zwc-!IOF@in3;s%Tw;dCN|r$ANgU zK*_Sw!6-t|o$vXCMWh*orAvg-byrBSw8+wy-ic9uWn!OAu67dqB$JeuBj*oB-_p~a z#Qqfc5p{fzyms01vaf z%~$xrC-6S8sU5#iOe=N$ItAVe(e2r{aEpQ_#b&{fo|gvQ|Lfq3xG z3dm8w18^om@iF2c^^KBv4SKME{!UPIXMDn5JqNb2vE2L9-Eu8Zhlg6itTdg!A0QsT z#lMa-&Brk+ua^FYX2^%F-6K}Z5r@J$1*j5gE<_&@>k(D~Zi|2MfbuIj=N9q?5Jbs6 zQ{-I?2PAntB2#u#@(l_l%qQwdNXEdZ_f69RW6_^T+^5O066*lnA1J=QF05wb$)ZwC zYKd16QscFNV}4Kk=V&#}fGm-psNoT_=zyn-Hyz3rc}w)@M{9#ug);F$Ap&nglzo)U z#d{Ajca120Q(-Xn<-%auUPLF`UHt;KsUstaX#i2jBPgTW`Ubm32aERl4%$mmZF&-0 zORSfEgof#Muec01(bqS>H!40sNF<&!!Sm15O%>0RB+sr?mVag}Ki!0T<7ca;qskyl0LfcDKcpysX1aWRzt6xsdt5I^t-|}odkk*r6_BP#TgSwSq1sSKW9yu7sA+Dv>E?-^P^i^Yc+1@8 zrn#QxPR}{Fb`^Q9yaKG6J1@Gjv-#F=VUZ)7m?6z<>}+ldbw)axTjvO_O>M2X>uQR4 zTDw|WJgsdJ&-})g=2?yTWF%@cY_1+P{h3ookR zX1}}lEO9dhH_y0xH-jz^TZn~75EHBG5Yd9zE-3(19_X*A3&g%CUK%?=Lp4}M%t@sq@#*!e)45X4C4{-do_M9 zH|{9|I?|ep>i#Pf7YcI2^=<7OR5=!nz@ja;CV(G&r@i(JU4!m zgKON5kr!uXdbyYLJUxx)=k4ej-uU|%9Jr!ysqT`QC;AmvZ;hCYnrrseunO@<@3VZ85JGucuPKR4GxPK=fl>C1ZWa zEk%U_2uOi`IqoK_6hK@Nb()@5Cu7HsoFzRi*{LI^qC-=ix=BS{FZgHIn20xw#0xdpO=}R56uZx$o{@Z^M(vkH zUr5<416%ZH8gAgnG>6)O(y}d}W{FbsaPMG7U!^63*kEC=;UpT3w$CwaFlEvod&R#( zk3PVKrv0fqR|zfLy?1Ht;(hu`OA$(^c#*n!TAbt86MyvpH(o1*ky`XBUsP}BtCxoYMe<=Sy|?J@I|xQ``lFjBAqAwFc-5geHTilWcJGgq^x|O$J)acr zBdVW#F)Wu_56fb{T`XTEBjY4@iVxYUa9yY8$BSSYXvCSfS>c^Y^=84r^|=NYjttXV72w)*qoi!)^Eq3SytHBKFJu zBzBdb#L8HA@V?Pmr@Wm7ir;7Fyqj&svvWSmwm!)D5ZRIT4)ZE$`?uZ~} z@KiB9fL9nEp@v0rCqB;Xc^gl5=U|0$DLoh|+9$tR+2QV`cki)^QP2H82}D1z-Toc; zEGBD*9sQg8>zji`f3vY7>H9Xsh-dN-9rm_(;RZjB5#~utY^-p)@P0(wa4%kJ#H$*$crR#60h`$A_jaQ4J5wrjlAiqU z-HOFPuc)Tns?zgLaE<)O-kmi6dK9}4l$Kx{#g2{?`>hr}D}}smcGgpI>P$rUu%N=V zUAEnJ*KD@?&v=?R@7>0CDugWhld?W6rS%~;&#>2xk51jazg2se1#*$7*MmzGj4(-S zM_NSEGO>ZSmgbe;*DE-^uA8V7NG#PN z<7pN5A>tKVU%GoM!3_)A_zA~jhpA(Un*F+aC!&C~LUO)~cgTR!B6fw~ zuf_vf$X!kR)$&R&48vdOi& z_qydezBaZ5ZhkNlL|qE$rJAz8>upzICH=e*+qkC&i~eqx?OlAby%F|bf&*{nJazkH z(VciB`=Q$C$KZu8b`Ex}50*h}q=uZ&!E=Pz8rI**f$3?%=wI!kKiEs@6QCtaPQnGL zx9E)M_P)WO+Xf>FNC=Ls-t+=_?%p?b-RZ3Buj_sb=I&S&+0k)cv5E5vXzbWoL43)! zcO>Es9)jmTj_ymLGje?(y0tcTJ9=lUr?%`9s*1b%4O&NjPCPBb_lBCyAi0tg++v2VjdIfO!F4Zvpj>YNPwgd-r$EB4(EfX0M^U2BSk{APi!; z4+Td1o)@+KThJYNQr34L7I04{KSixAdM~h)Oul5vEL@NRi<+vU2lhU#_G9{QL0GH8 zURPOrLByBn#yt1TzYc!RY8ZbaF2b3sP71ycBi`+wZgPtrjZ3cvsLhT zE`@{irai%;U8>+nS=?(qJiFRz9Y*lD-Ms3j>fu-ob^u*(D&>uIV7WnpT z4|IvXhCAP2pyzgcs_XVY6>wLrB#rP)|=}S6V&WQO|bN z^Xb~Mzd+pYR6Q?G)bsOd+9I23xN54=XsR=%AO92Xo|L9GT*^^(yXUq7E8zCOisNaj z`W%F6Xe$#wOTLJ%5@TD8oB+4)-emWmOqYTcU5GTrcgeK2wwCIS#(Cje+B$Cb`RJ3C z##y1pjyauvO_e-5>Bmv&40O80*s-+>iKMoxCDP1T>-@u=eV3+{KU|Ujukug7;gYg> zT;rO`%88!Bnkm;7d3?U&@x>*cu_a^2m0ap8^At`E&+^!f5tVV#*doiqcQPVn-8$9E zT9BW0`tYH-E74u^f#N$yk+8`x5@99kntpSJ61|Cs>5E7c=&zlZzbv~sFBg|oz5xfN zsq_bVJ=xilPRPrx9#)Z;OJ%zWN5a?97)+>aP?V*-)r29H)1NO;*;bKfRrJujs`46K_qg&(!QH_MMaSWwwJ9YrhYsZBugI>*D_G|8=XrXD zWUo0PuK;y~;Hy!8bt!+>XSpi(Ybq?tRVB-<%PXi7#iYI0E85G6R@k&HVY3}7Pjo5K z77IikwcXn)PxNTq>-NpMwJs=Hf9QS0@pSD2)Ye`?kZ#8@gf5bG3Wz#|5_PJ;=MIAB z45CBAS5mSJ5iWDBz|Jq((P>z6TAi$FM5ySux;1w zdOJH4FBGM=8?GGv0IrEwKl#)OS4Ey@*$}jLPYygO`}MrS>O2oxPkIp5r%IK3UzJPq zMSsEasUWUR9}X4PJ{-{1wG(4%tZ9Igr85Rn$jF zuSYW4xoTMUTgXW7J2k_yrz4{#ttJ8TgE|}ys%>_it^7dfP(wPvchdUl)p-NQxfY+A zQCJo6^x#;pc+&f*!XHtnXXvuyR^;>#xzlx5Hu?06B18UemW(Oai#~uXW4aFZ9%B79 zIS?{RmxV9&G9;IU-%-erT-IqBP|I>zryFqH6H~KY;+MfvF}iaJOJu@Fr-7&z_Dn?d z6?ydYPdd58uZ`(Dc_vM=87^X?B5@_6L24P7_?5C$jP6|G7t1o?=cj{${z3y@oQ6LE zB-={TQ>@X+WnHWIP&#|e1x^s!Bke=nT#7LKx;j&88u-Ac*o?1WhTb*3= zCV(SROw4BZc`CmJcoy?3YFC|H*5&###9sJ>`-CACmHZ611?6Nv^tptDfH`u3gr3iEDo0vZnJ} zaZPVB$cUAi)va*e4~c@FA`$(GbYkgi3a9a#Cwl%w^k;Dp*Q*3R+JRcuw-mj{k@Fpe zJ4>ulI6`+y`VWOWd+<7iCwrtUwn5?Ak2SMR3O`2!>ind@N2j&eHi4)2>t6|64GN6c zK7p$Nkm17wsTv>{PM0q0S_dL;U6x>$mghoG&6Hf=lqtF7h?SP*vRF`Hxh~7;B{I_` zegc3Is!NxZn+^hTS;rf2e2t$WxhxuO)04{@o(=+WrFpF+b6NNfEkkly?hGjLaeBEV zb6NQ0IYV;ERYO{i%Q`6yBfynd?WAVAEU~an1qHp1+Q}?>lW7ba$8}3L2|PKD>-Etc zz%$kAe&C~7eDf#~g9iF-2L5{mo{R!|WO75X&#`%no&j78d^C%bT5ixl-=gparJq=* z;d+I_yA@up&c6VjslLB4;J-BBr=g3I{)39ISm)vTGT@o`Pch)L40x*nUvI$arT$EE zPch(U8*rZizuJJ`Y{2g@;NLaiPXZsq=5J3cyS@sXM_WCE`9R>waaQB`2uYdvTp;js z6<5m=>sZ`X3%oFa>zPUq@J#l6!hpYRz&|nYpE#8COpa21rNJ%0$*%@bj*?yr-($cZ zQur=KPwQYx&jTNwot!7=$onqvO!fMM0Uw5fndnagPI@}!jt4%5&11zH8v6Xyz~>f>i}g0-ej7sz>Dhq#Q4(usoHqf_RId+#k74s_v1SJS5)4I|=$8Q> z!{*x!YAE(#h{{A?2Yd{hhl{l@_`5N5Wum_dxX>T}O5Zoo?=j$4V&ayG|GftM zWdnXDhRjTS78vk{4EQe$`0*HeGx5LJfJY2?p8@}+0sox=j~nnDD49vma}D@+4fy*8 zycAQ4O#HuYz<+AMHyiNh4EXB?d=v)OO!{1Ez^@@3W4|-qXrONfKANouN2!7<1fE5vHaf9GY$bL~3I(pCvfS6C!hYboY=RjsS_OSg z0vf5VmI>TW;05aHJHR~&lwE|bcRqp$j&E;iY;N^i$f}Q&hNjL8Mex3U05_M0LQM-6 z_qeYnt2G0m8=SNN1qQU*Cv7et&&rTzg}4M__XO_z?c#-d7T;54Yg$ zeqU!KP+v;WxR!8dXFy~(3dmoI*Z#vDjh$hC9ZFoHRS31V%?b;O;^xkd#^MrEF~bv$ z%CY|W#Z7H3vkdvZl0a*7q#3XKL&C9CYie0lW2CVb+O#M+HNk5pRRlw!&aRoEKq&1Q zOL0?sJCv!91WQAq=C)>tw~B+6CG^Kfe=eavAMJbkJf@1Jilh|XdyJf64k%&ytyM%INV*GI-TMHaS)(ekCU+CsN9&kDEt zn+~fM8p{eP`(0CAUGJY3npQC>=ntXfwCkMS>3El4xbpYN~f~Yeg zR5!W)l2D67=dMYOV*q`s{T=8~`11?KtvQ|sHNk+V+W zJpfUwR(1hUHb+Fs`pDRlPzYvgyCu}Qurm^#hbY(4*3q1(lFXZl=fzEpoe}bDlo_YX zz=WZ;+3JI++4CZ3r&~#G2CqqYYlb`&N$*$c!Lxyz?my`cQKdZh*g}WyQ`~2ht&VTC z4jFmK6{cN>CQcSfI7-@xq{g!eXy3+`u=5F_gyN23x6p!aP`#5BF{g6N*Oe5?Gp0}OfOp7I_5RDgqkB5 z{-If*4rJ)2et%W_HRxpX!t>^}%@5PLseK`IY`ujd74KLr;nq2kxw2!SgHSy_%~{N0 z!f@IQXHxnM2hd?=IG`}tzqGoUniKPuOoLw8wUK7Db#u7WS2DS|r9}-LQ|qSGhtdX4 z3Rj=W>~7p_xEZ46rJ_BX=BYvJGtmXXwKg;-98uvs)YaPjm9B88y{%I!D8`zEW}#W( zrjGEuaBGC-rAe6Pr6CM6qSt&zRvk-&606DF=GF+W4*E!PA}zjtP4+UHhl}=bRJXbH z@YT&Qz*oJDH**xEe z$z`ai1))AfI}aGeXSdPRDP2}#eUmODnN@F^%M#1Lp)ymoqGeuZb7sXdjF*CYMqF61 zy{jV}n%~?J>1u4zJ9l_YC=xxQC!hkMeqgyHRso{XmTSXIM-({!@1l84Ciul)Sk7|KBqC9 z%N@sXF1K9abh?1mi=NL@BK};Tg$!>+p0>|YhPN^NPKI;+?=#?A6iz1}lx1N(7|x7&k_Sp&*v#Q`G1e$oc~6JJN@uUM$hg4tiq{!V{vGE{*KXe`~R8Y zT>pIx=YE(&iX+j9>q-B8BPBX@( z$qpCb(Dtuo^xO`&Go1IwWen&2@ot85{Ra&AdWLg*KErTcuU!o1{NFL)!`1r$WY4d# z`g#;j{b&Zms|@th4fNfNzJu}UV|XXS?@~C~pWbEH_WvfMk1+gUg_E8Q4F5C3Z(+Dw zJ>RGL_A>k;g%h9o3}3_O?_&6VhW9gkcrI>`h(904M;h=84S1OW?`QZukfH7K=16%NhkQHImkAsWBpMBE?|{c0QvX1;cFQGqI}#SkzCG4{tu)QUyMAh&jv>SLx%r?;hawi-bbKB z{JH=9p5feo_A{LOPY<5|QzAaxe;#8v_n)iqe4Y}~ug0PEuU5Fze`*=Nh|%B4_;COE z2BYWxGr;fx#^;ogxIrR)cspKUz}GXJ`_H2aclwX~c2e@^{_}11>Zw!iPZ-YI>uCf2 z62pIlI%s>ItzKaz{+v$*!@2*gV>tJpEez*;MyVG-h(Gs-^BMjC)8`I`hZ%mS!pUw; z3?E?h4>Ei`!?`~D4fsg)VhHKU>)XR{Uf&-woY!j|!g4>0=Oj1O(2xc64i^_ZMgwI@hm|u=(+tH7|!{$D4gtnDGsfFgwb>RFJbhhjDEF&eysuD$oP~o zK2I6wUohY=GyEHj&-;x3M23IN=y|>JM}Y{5>~I+lZHEGdll?jUVutg6=kkCMiRf>{ zq51edxIuE_O&$RYoX3sdGn~hbpN~$`|2r-<|7RG^<-Tmd|7O6?9wYga-`>yoU(E1D z48M}$w=sN!!l}Oh!SGiY{xyc@osAnLC;d2vbNa^_{+}qP?eHSQxg9)S!9I`V|c4^#8?hPX8RkIsHC{bHDn? zfaet@_2=}b8t_Vnb9;W%fWNEoY={|$L;L>+j6TBfv(A_L5dGy0KU?8LfURZt`3Cxn z4fGQX^p_dvuQAY1Gtl2)pl>kH&oR)q80b3<^a~91OAPeA2KtzRex-r_CkFbT8R&N# z=;H?ZeFpju4D^%X8%hOG@CzNdD@$gKbKt*L^p`vEs}y~e1J}<>f)2b@(NA;W;=k{Oj2j*Jw-x;?2Y!*l zTO9a_WGJMF1OJY~7dh~sD7@E!t7Wpa!hw%g^s5~BIG2>W+JV!5A3|x310PiQS_iJ_ z*E{eR75yd$K3vJ&;=o^3_^%xJNODM|7ajPy3V+3cPf+-72dP2Ma=h|qNAbQ84mp4l|EhvzE$BDI`BU#e4GRShSKwL z2mZ05SJ`RLH`VZx76*=&eylW48lTMu{8a<4?MiC%xb&fczChXANndQhCm8T)2E5IH zFE!v%11`UhPmilQt~mAoje*Y}4EXy7{Bz2$o&3)?;A0K=WCQ+X1HQK)6Y}5lYWT-zr%p9HsB8#@K3A0wt@c*3MY5r z_5F&0zQ=&a4EP!Y{+I!O#(;A>@O8S!K!2XXsofXh(CtWjK9r~(`F_R!HvbACHQnD% zFyMN=LwYX5q4n4EN~a%IF+RLr4>SCSjE|l_I{EBo^gKR%WWe?Ol=yHx^*q$cf4U)V zH!}RkOwVS9bA9x@mH7XJ(RVU>&PV>!iPm#dF@iKVtapOwUIdzK-E~-s;E%`b9){z;G#oArc)n1zjo?BngB)5>s!olu1_VyIei_&IsImab343Z z!2id9Z;@(>dPQ-qJKhZ%&HsxA`uhwx{Wl7fNFUCph~Zp+`8#Ck`t$LHKR+75 z?8Eu+_dmZ5*;;?ngAx^^YmL*KkP=~JLyfD~1jIc2zZrfOt|<{8vZ1Cwn?NMuvmS@W z38O^SK3!a;{vn*^#`y})RcFF~uCDp%Q1lx)p}HpeO*k^~e}vKNe4^jLaD90wK96Z4 zoH-voH=}G`FV3IS-^BQ+$L7LrUt&1t!})VQPcVAUhvObaL-pb~=l>|FMK)HV5Yj=H83J+q)h z_;KP&{;}0H;X@fdi{X@~Q?I%pjE)9%KcLPU?^gHZtCTc8DDGfSQXGl!Vd6^una1!F z8NPzyc?{pkaP`WS$bFIFx{WA9{*GW$p8;hIHz|Tdryg-7|9BZrm%9}GMGV(#A1a~8 zBg!5@fJ%)HdhJgO7_MIV5heKh=bzV1aL!lrNh(H%{+%Y9;ioYARSefYN!hy?ek!AH zpn!*z;I9;IW;oSX+mwD^jS|sQeYLKSF`VdoReS7YcuM>%?p!!8(m2y9#>aE=JXc?J zgj*Vm@f8k!a%2{Mw!|u)+1Y6cpjdupnDJ*j@D;f#MpTZEol?Hs5#N^`1vz~HU)<3~ zx{$WMV*I!Xzs1j)q{#k^KBzt7AE8OrHp6#3nG9t3C52R8>@&3^|0*zpv}5BV+bQ7% z_-Tyho5L*&t>QUN_#|yXQ@A}6f~(<6FWr3NyTi}ohXIm)lu-9G)8&8Bd;hQf_&}-~ z{y*?T0jUaoMiJp~KNj%+hLWGg?*OoxbVeHSYWlVr|CdQeBf@}+*_pn!Oc@{i zs;mh3i0mkcAtn7g%(Oysh{(iWeX=RlJRIKyagv|$Q_jpB4?&*fnIZ5SDt^S2hx_%b zjM}CZO4OfXqr+I{U$e^0JG1J-2+qoiAKEEC)X&l2x6|g)k7-!N`0v-l#rRKJE`qn= zcci53@Z8XB`hRpV;B>UX13!)QD&}K+Ua5JOs|!7^{37mW;84@8#MxBdR^>;kGnK#c zD9Y=7j5ApItB#_)Uh{DI1UwIVe z*JUXG^`j`S*I8WuRYy^NyOPiShxRLuwEjC(c`l#!1&*}*D;eZ(Jc{zWGsu7VD9Z24 zAfNVAj#U2x8RXM`!;zNP|7Q`mKkdUDY5C!5eZ}oh`xHl7UcaZo%inMm<-Hl?-*Ob? zOEQ#iIEwO@XDC1aD9YOz%J*a_?*T@nd=CY;R)Z>Eej(i@Q-1PNT+y1B+N50ECI2k= zioDK0Q=Tb)(K=rHLxF_F|I9+7)bd@S__pMt#AV8bQY(Rxdc@OI-bwGg_n|Cd$I*hH zB`x2{*IEDRD3huDI;B7H=gC=qDl&!r9P*c|&9o9#nCwSmk<)(5fMt@uPsyL4%IlL; z{!(OQlD|>Ow^c!se-;j>{B^)c{#YD5om(vLF6JPviI$Q+)81yL@-tNVTLpFUkCv~? zzo5!H+i#aD&;NfQ;^M6Tv%oUd-@1#a(GQdZQ2oc?(DJqa(tBB?i&OqeWss+l$CFe3 zA{5Rfzkvozqr~C&D@*6IZ1%Iu|>yuOd_YLyv?v^F=z9z{(7l%{+uMP4` z)CTS4TCh4htLRe5LsU4EG)IW0jcFEl%SruPss*?*lXuYdnvvvJygD9UHD zf8DoaiIu7#)xQLX)BgWvkgxya1pWU2C^l*Re}zH*E+v0J8J6UejhynQLHSJe&o7q} z-6(sqkIGpmQVd5g>D>{AJRVA9l zsmg0RX%2E7SvOlrZ?sRgOCQ~%Wl{kI>X{`CLa zWYT|M2K}inPW`tUGE3s zbc6gEM_B)<2Kj{pQp1%>QL29(4yXJ@O1>lfSSywM<5YQlB3tnkHRyk@DzD|~GS2&6 z0GaH!aX^;Pb|U@jap>|I|2Y8CsYH?|{thVlhg(0<|C8jb6ES&Cl}|8K7f#w2RC%ZU zcBu0D_qsG6t-rp17J#U~2Gn`SfV|kK7*qXet*p!I`jb9R*;I0HGW|vRoIG8R(+nKW q^2Y;nj=fZ3@Fx0;ccxB~XZ_b%f7*M_RDSsnWQpBcpkm-G|9=4KF}vmf diff --git a/UserTools/template/MyToolServiceAdd.o b/UserTools/template/MyToolServiceAdd.o deleted file mode 100644 index 1bfe2c95302e0533f0de0f304248880202598d20..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 39104 zcmdUY3w%`NnfD2kfryw1m)3Y?z^MjnLxPBjmv0luz?m}9EJ39z4j~LAH8+z91aE*z zkm(R*Tf4Qb-K8zPZEZ_!x68U(V?n^$-Lksfb+ssMwK9!Wdb3(DeEpJUNMSD8# z&%kjej_r`nm?^T77*9&X?-z%W%G2U(eU)1voe8>##mA#Cegvw)HuJ^N02IVw|J;x)J9q z^!1fEH|guY*5@TSH{;kv<9DyF3``%wg=N{h8rL_+d)KzN)-S!P*-pM`5563eEnVu82)wQ zbUSq$E{-NnQaQt;{nPE_s}9w^K~gcGz)S?%P49xY|B%78JWD6UT#W3(B6)nBvP4s`vX{_6Uxs5AL?v%TqMdmPNM1DDL4 zUs7a`+!xviv4P2pmPJ#Q(bVb^J9&MmCz?DQEjpAqsd4z%fuYObzi4ttXeS6GgKq@_ zcZMdYtD?a0GviU5!1uz|mLVm`PCgw?t_lsz-&-7nKZZiv!4ysPmqbT)ghp|#swbN2 zgv~=CRd1>AGJD@pDC^wOvb_$UA})_6k3@?O8v|(5Yk{HB657-eYKsiM4TF9GGjIii z+8u+&M@Jq~2E#6@jV2F8llO;q(Sp>iAqag3CRQjDl@-Y!*vaCMQ`0GjwLZBwTKkd? zlITchMR*9B2bBoN_Gs$6%5GzvO(_VwXwh#IC)vZJfuTvt&IIjz2X8+u%OoWrT z_IZ8#V0RxqLKVjH)yaE8dswoOtJ+W*G-qwRcO@!fFD}uHhLl|}+5)x@r(DC5yR)VZ zJ=;HFU$~8J*B<%gcARo`Y57gvy$K_?7(|du3*J@k#C^ltLGqpVS=J*phBlIX%tjEQ zG*r~-;>P(~$*c-4`0AC4-~ofmuVL1H6VhWt|}z3LSDt+DLLlldzatyK)fRMh5{cCk3v{ zam_?0AvS`%rSSbcqh}#N>tH2(#T$SsGr5=eG9~`O5=E{$=4kRIg{A&+J3w>CR+#N^ zpqRDE6it1JmlVH^=ajad?fS!ro+yAr$Y>;xh5^yq(ZHRDXh~|T>Pl=3HPug6zZRLb zUn#cHUrXX~S2}gVtr?{PZ*s*DD2z3%-RyAT&TdE78 z#=W7?*N|y^t%Cb^f!mjcnm7iUzG7rPqx_PY2N2uSLbm8S!b2}4Fed0R(5X`dM<_&I z<#ko}JDfOx0Xv~gk5H(&Hrm9xrp&rbcciME(S^Dt#r~{?D%`FQ)gU136aw0bz5ASq zM^y#w)b*hXGL0%tjLiFrd0nuLI2OyMVsvIBaMK=+iF!n@UtWJheQdWHuN#M72n^qZ zJPd2;&>KgO2BvR>BJ>WU>GF4u9_8>jT#tMXFM7a9-;<>?n9a_*`bl(yP8c9Md+g*s zMwD_F3PZWkmZzjgQ;E=k83*j-qij3qMt-A5usa#p$;=4G*$l?AqW)XReB7f~ zVCZ_dN12pff-5sGx-4X)|71Klc~8EDylounhu={pIg~!6jNXVlv^d~DbMUdGm>(XS zUu={ZGl1V}&!B7>&)fnfe@S_1M2RitgI?F@j7;UG!{kXmpZ@jRM~?y@+m4^Kebgl= z`53DLk$Ix$-I_o5tiT+}IX6_9P1$yiRy;uwW;1c6^EvZ$THOTYa&#xl9jSvam!vl6 z@po`f$!<1^?uVh6a15l&YEah8z6XXcl)Z~&7C)0a!Frz>y7b5 zEOE&ttsUKc@rqeiZ)@=K%Y#*xm1ym`WcKXyv6!i>oPBe=x4ZME56!Kts=Ro1dsk~m z|El4BYyzs!0dJ22c*V%NTHCV+bI$WAC=EGFDeiJV!3oyr+!P&b(#R&=S^ zW@~!Y?Bs4ti5q)?d~DL$SydrPoXT=u*Z=s~1Crw@N6L@bBigB@40bbhxTje46c{0P z>8jF|#Qs+I#@U`R`MfUPt$H|RqsMe$X{g5Om$f*c=$Dte`{jEw{qlQSk?u!;AD?f> zPDV|^$I3*^{v1>FS!AWxu|Z`W8W`FMKvAc^j4R7JdbG%2=YODx7sZn?IN@AU?ys=s zIoHLN{uMIM-a5|z>}2baasL0DY~5Gv|6ah_S^Rj>w*%JC$NP6pwsxHCADwKyJ;}du zvh~AL{KJ#2Uz8Hx7U1=L0pdpjY7j(hw-jCTO&@N4>GwZ1#roC+|K2IqPbc`_nqqx@ zqW>pl*7qj*ADvg3i20AcNINTRc@SSnMC1X8ohz|HEn4&-|eLqu>AVG;7OvTz+M| z|FLP-YZLrmpJsi&#Q)i8);lNpzc|hM_{6Edm};d?_K!`q?mOB4ooQBTlK;)A))SNb zz(3C7{5;@KO|$Np?0kITlHq~FFm#)oN=wr;siUy1Gz|gRoyK|kNsw=mX#UV8rKTV;yQD8%; zM4>x5d?P+fqN#!MMsjnokEF2v=Hm7@Fd&+WBj<&{(8r;=G1U>m9kOmgR%5CkH(2(a zt!}Qs&EtWgM?n`!tw*!M1m*D6h}qpfKcLMx z<924PGyH7-i&%m{UKsMEnLr^=nVB&1bW3&Q9ZcUVQKdNJ9t`uSZqGb56;b z0h#QGSoR;8#w;B-v=3+``P1}=aL!C$>CrJ}q&7hEA_$J<$lU<3i&W81*G1P8ESUPf z2!`ax)R3G3`^QvdVU<;9rYj+p! z`dbshuKtdWU{`k{xVELEeU(wH&tkmT^m3c(_>~#ln`u&=AzaLDjssw@N9;Pa$aE}4 zODxaq^t91%thKjOuv!`K$6l%4&BS781s6$b<+_cM#HIoFhMIIha~mGLx03H{0C6ja ziCZjos!?wMU8OdFYG^N3ax~Q00ICAQ4ImC!cB&o=-K~0y<(^_y0_c6(5op=Ik02l0 zHBrpOl9-8U8^RbQ?o-U?`_tr*i6Ws|1jk0tk?MfKoY$WyREx3Q0+PP_gK3`po2(lYTYm?8b{hBsIjKXL3YOsx?4I0v0VE2bcVT;`FVDRrGrq=)n zYm(qG2ez$dV)KdnSez<4`&cN7vyX)f?qhLFj(b?Bg6uYy+2+w(&9#REL)$p};wmRF z)TvnS32mnqRK+%&e;4T9p_Uwh$ve~zo0`a7Ps*g;)W8<>C^=ARW^J-}0&LLPnf00W zj@qKcr05~1s#V-Mkxrc=G#PJ%R1W|P2VC1w=y!V95KUH%Vr#K;t=2+OXD#HI!6svPosgC-SF5IZ8P%Ar-tX)>CY|1j8$$Ows+Kv% zLQT$Nnl139*~W1m(|iLGafPQ1=itgbrcp-nG0hIuc{opMQk*U)oke0awStr7GHz-W zz0LSPh(jN+Kfen$tqj;8qbrY{3{UEL&y*Q*FWY~Cmgi2*4>UH01&{BgNOx$!4 z^DFfsr_M4@DOj_N9Jw25Z`#9dT}(ODc#h!G&vsBDh!-VUCKe_6y|K&k@J9(fPFn9*NM1-TZImFdM1RK zeYJa>S{QyZakf49r=rA%ZN0&0r*xL_GFl(wYj5$XFvF>x^i7!Wao>^UWPfT(nf}w8 z$oQU9t}U2^|n2FwS>@+*Md+ZXU6WcI>;Wt#F4~o^1u~KZK zWP4P-Xh00tamyGjRyMdlqp`|XswXt6Ge{mZ{Y9qHraz*J_1GBJqmQMdywP4niTW?X zM3widHfx^`41Ev;Dil;W!2{_x;SS7;#+)Yw>9=vK2*xl~+C7HGxjQ`orgQ{9aF0r2 zzBBl8c{H^SeHdpr$+;o5X#FXu?9|!OR9DH^G>l`B)WWjwqaNA!C$5bo_co?(uZX0& zf{nFrHzwa|3@m(+)9NXRF2so$-S`w7Jvrw_+oswN!i)_#j@q>cZvV>g z=)e~tyc}~3P=C2Gd8BUWwfGsx!2WGC5cvV7 zhFDG?!G!k*+4V%@tXCtOsAR*2Rk)x5x3)GUUmtr+uRllDm2hEG60PkiPgI-mv{REp zFadp7g`H{(qMXrGI|xr>8BtOcGu;BI+dveqer;^f%*Vskqv<|m08HPAbNw~;$fA-+ z^1tL0ba>jX-5VIXk_{rY5?y6ig`NCeG&L@ox&oejxiR?)RQz7mz-M4{H2H7QT zoaQhFwoZqA5WVG8oY9|EKS>wo^qjCPtMT@`#{OyjXF+HupRsF~mnROO{&kSy;umVHQz!3{ZQy)dG&55R3E22rPU~i#!q4+Hjys|~K1o;A3{=%wP8`33aL;Pf3(qLk2XQ>cqOUzW{2iK-keM<0jOeW2*_$xBF_;d*F5J+%TPG%;W&r;X zH;=5qPbB%W>Yh2D9~rt>0Oh&9^f!om2CJ!8b95 zT15{GzKJQ+d|Y7)HGWID3a!~rE{C1!H5JYo_72u3cX`Hn>d zT`roEvp~#$(E=|>!2P!7o36$QkBG&2Vd@}ejMR26Dvc!jezC$4yBmG)F_r7 zFY#T8GEiv?%3*<6_SUIFp>1Txe7#ygIB%!5Vnd7TJ4_sdnRQ}b@ukhU*}n9$P)36;=&O!z9iJ+bRUIP$YG8P_n)#Rx zL6Hd{)c+_8!C*uN)Vm_msdc$jHkvq3SDMC(w}q=;z>106NA8y!#Cm5Z7+6o~b zaZ$b3@Og!^FJseAUw{!-y?4Oo%!;XgQmJ9_d8<}D4S1A}d#AD!p=Fr+ITeMDxQ)vM zQl@{emRt=89!loog07YCGkCopA>L=u4>S>FJ?U*+j!dt^v=$ru#ZZAhH2qJ1BDfKf z(IE3xU}H?afq^F!$!9VPuatIoDjZCA3psq4k$EDFF&?|Qs(k4OK!knZifHQQirf)v zDz~sBskK2xnB0efYesa|o7G1%!_0E>I>St0>tfs_pMUh#>FI@pz1C59V4eO9>Wg8l zE9g1i3VL)X=?aeISX7jttm%KHCrnHuSoJ!~2_;aQ2-*)iWo#x^k3UeO$zNoc(1JO` zVA9<*V;`;Dv>c_3ma)g-c(k48cWJTyY`ZT(~&i+uPk6OBBdA z#MSc}*HeV^&UCzf)hd(o|LJ+rf(4fZD;8aG)vRE3b>-a3>foHJITu#XsjdlDEQzlQ z+ARs4@u3T6S=KaN%%Yo@SVaS6MQ2Q!P_hl(LmBWhbsi>VDiN#F*HiVGgATeWm4f`W zOUpKoYc4IprQt8SxhbloPxve;&M-QdA0mQu9*$yOG?bP_Czh7fpKMukc}ooGK^!VS zT3WVsTzzTzW?y}2aIkn$Y2CQB6H9{)rRDXdW%Csy<=#prr1#@ce5G}R#hZOw(a~Er zj@NN8?Zg0W%`Yvxd))le^1FQVOM@fD^GhqX`s+)pHjl3_tr?uqP`bkRDPL&~kowXJ zP=FE~&;?dyBmBkm4gN$j_JwKRxUCb7Wgk8nW;;eY-zD6d$A?mE_BnjpO3UF2rG!5I zs;0e+I^g5MaotKk^Y-XG73RwS11gX8bx7yYRwjP?aIeNL#kKMcN~3+cs5E$O#$eU~ z>wCYZZ!`4SGDcJxBRMRp-rv$ZS^vWs+W$?Eb0#YS=`aj4HoQw6EGpgZ_x+cDVxd}C z2s2fk{z%JQLEZ0Fr%mG?g|vp!s)pP;MWGox<%|oYHI%7jMGC}6gA*T3;~p$%S;NWW zeua#Md4aR&+?lkdc>JX1```%Pw?kc2+fswLuF`z*GG{N%Gv%Ljq@tkXSkTAiPK>MNr{crYIX;LDmsYuny9X?nDUUgm%BOnUTzUsr?-b@`_^f$(m_uLV zf#bn?f#kDPubY#@SqbfAf6B!8H_3-@+T5uL`Qrr6`d~@`FB0)7#Gt%&ipKrpv*x=Y zb?>tlc;JO6Kt5}s2fgiqM?CPw9(bb%&M(^Xlh5MUZTZP(@oTsI2I2Z8vktMl+d z<=7c{_!fodx5p7p@9qgcuJMpdk~5~xu-q;6?-YH0d~*Ho%pP(DeNE#Na$)P68pm*u zlfI*I-Nk0|c4>TE25~MPR=E1f$@zB$>CRl_j49kL9rG6@*vt$C11^2m(p*5_`4H)a zk`FDTQ1U@iq2zN$n!GHZ<iGo^}qujIA?zO$!AT@2Z8vkDIU0)<>zMmGOGqRt`=-L;AN=VDVg5f%G3m3 zYnUmXI1D|1K`;!W-<J7rseMh{&_JTnV##{iatAU;Tnpm2whAe`%eZwQ_OP={dC~0#{&+# z)uzw29&)Y*KEuk6o5ug09&+vkK2ywpxXxnwo`?KDc;F@IA`6vkkq6%Cfj{7Z@Akka z!`?!A>pk#~dElS(z<=w3&qEhqNblty_@D>A-2*=ae5Pe;&$85774>p6hLA#fH+kUS z20qiu&R5ks6>@qootU1`qm_2fp0{-{pZHAdWG~oep}?{|fjFu}(QvGx|}e8DgDc${56jxt2N>Dtgmf zdUw3%=GgTf;j-1zfR;K2+kE@IbSk-ukLv#`AjFI)G;!)q&B zyE|4{plVLk#Fng#C3@rW2yQNp#aagjs;jH#$XB-`t<8z*>ejXv>{9o(v?ux^t>NYt z{Di~JiMc&3?Y)u3%@@V+5BJqovF3P3yfsnXmxwgi5SrT&@9T@G>=p%uYw#|3ytf6W zG=bwnV?wN}dsSRfRJQl^wp3Qp#QcIRm?KJ%pIu!Q>1t23J4HQ_LM^FnXi2m*!j=y0 zs728$=hsJLvA+J5u}I9l8DH6oZx3VFHYcJru~>U|JG5Js(FIlfspiidb)_z=ookhM z5#bvC)bi(I{>)>M#9R9lh@3q0 zP#0EA`{&Aq3!B4BV@vDjN5e5tF1v*Uey0tdFg&vhb9JUm>UmEfzz(CDz)~ z(GlzKiVyT4RO4t1S9Psx?^+dG67TEpj2CveXh4b}_aRU9oQC?pzADiUrS0*)>I;{| zJGX!D7!q#bL!smsbJ{4?A4cgoJt*F8AxwQK@ zqRXjR*m6R_$v|0pHF}1u#&}DoM?CAOn%B{dI*pqGosix?k6AnHqLtmldsTl&yk-qPcZSV3s?fgnisNSs&m}v9LAD2Wjw)~v?Ook@ zxt{f{_!uL(QGHQ!qPsV~z+qc@b$*-_4o1!3mZlzrR@a6HdhkbM7Ib&5ZeP>is~n`f zkQ*;GvNL1zZRzyx&vX>jw0HN#z!%rWS-Mn4;BiZ25Y{?oKyV@LTJL)h&E=S&#<~I# zT^-8}VeLDF|GZcXp~Qm6+Pm2N3tF&*L#Shy7txw6w;7hzJTZf7`9&JMi*ap9XQ$N}?`-W^f8wIhL$|N#3NKmQJeLN+wY9NWqOG@k9oqRC zdS)ZO&Zyj6IhVj~GGQZ|aZ7db<0)htuARAZpk1vz75R?Om9qC0g4w(i>N8 zN#Dg$dYxNniSUZ%W8?%LGSiw^&Uq|@HT$KQQl+}hpQ z)6$D2nbSkLjLV(gVvK51(~erSi9~z=f4L>>OtaPBaZ&Zz-O(3rSKvy_tP6NCS7db+ z-fL=Be-$!*LgoJG$^tr^`OBXj4m#?vXuvEp5szh-Fi^QPk~0CrV6IjeGFn$-eVxOD z4k0rt&gF8|Oqkrb)8EHd^>xSE(BeDd#u7Lk%S>o*b_b_^+e{hes>Ey-RtZ&VvEQz+#E@J#lpItGhWTfowUoR8TJC90=)aFUT8_D?KHw;N)>>q!6q;*wfz| zkF9O*P4u^PsLc)YZ+#HIPT;o+e3ihr2wc*CS>xP7nl0!b5O|fq z`6(3>>8o)Vd-x#)6YYuPF!)mhkRRYQw1*R&(S!=@hO6#uMqT~5qN{ZrJQCD{O<&Qmyq*-z@?nm1TN_( zvOq{YNqT+_&BW8)IE?)@0{^VQ`Mih8P2VQ)`;c$sbZeaTafar#G=YlEPd za<&S5y^!+>AxFl;zj)wJ2{}^!GlE{m&ntqyQ|Jxj`4AKBmvORE<1FtW4pZKz1U@YA z*95*<;OFS)GL$3f7Yba`?-uxWA?LpZF3bA|f!`_UFP(@RB)8rb0+)JI0+)8~6u6{+ zT;q&Sz8h=&^KXKFi@<;4LH~>g{m(q;f9XL#1<$vb-1eNRar!~pvsY0&^dvF%&(-4v zZNCvNPZmsD;FE2+mex3{esK#fzaPtP!#V-7LZO31^a6`Yyg|p2tUFpKN zE7Y>Cap4>Tm|`ycKOERv_H?`OH#B|1g*R(_y$c`GayGee^>rS2HoNd|Y5Kcd zxGAq5)>Qkw2K=TT&GREx+cJTB(VOQ)Zu&F9%S2qZJKfwBegpE1-m^)JMEV#GgP%hX ziTDZ}2G`-7C*Q=Kn?49$CbvB%9^LeudoU6IJPxB*eSM+vguokc&2&DFkKi!$iwPnT z{{jw!n|h}FFAChmJ8|i^UfeSgPvS6gOnnkxA@HrZW^&8fuF!n^yB_$H9=Lf`jpdU4 z|1=$U)H{g7*lF5<+s<VcQ~3hNDd;1_t{d_S*{{VP52^&a>p5By#aT*Im#SOWu0 zyFS-3C$C-9Xq@igm}uy)@StDefwy_!LmoKS2u$vF>Sh0V4tXB^B^r0z-{e8R(gXJz zkG$;voQIsRdEl~KH-g6aIp{%e%FF&-u2W2X(Qk5{@_$->y@#Dr?@gdF<-+S#D$#y9 z?>bH6w7(CBq32!@6LC2oyI7$Pz5>^VewxN9pK}9)8+y0jIG13e{9AAs`fUV}NH6yX zKBZ6xm*f5^qF$P4BkzGamR8LcZJ|FyEC>j*OqPL_a3=&eAwf{OZ7z zw_4C|6!_(WexAS^1Ww-?IjcP6Bs_4rZzlDA%7gyz1-?n>{f@?YVp}lw{EMKM@}JPS z+i!=2oLV904<2&(UNsZz_YNFJ@6QD;$LqAl-Ss|&0+8JJ`5Na*%BdFgvL9F^=w(0f zB|$IamhVq9@id6T*#9NH??ik^;Qu6WY0u*x_(2c+_VEykMES!wjNTmr-z@Ma1TN`+ zBXCK7-UKisQNF}i3H(mv8NFtIiPbz;;D00Nw+Q@Rjl1bT=RyAyfy?zvTHw;3zY+Kf zq4y1e%k{zydSS`fP&sAMn6eLM{{Kr*Ig5eup5E8=orrF|N@J zJ=cs(Zu-wUa2`J1L(VLLOM9H}tn%f*BIq|km$7FuD;CLZ5C0N~!9RuzL(ewI#R8s|xln>Pr0Id1;k1HVA@ zV;_es<4>>W0)H*&W&4uP3nYG%p#OxBKcI1Uc}@T9#`g$0QvMS{j;zP$1-%?E`2V0W zQNFCNi0E$zk!Sp{Q{ck_|B1ji3*0CAF-d=}z$N`^flK-!flK;-5V-WiKYHNL30$_v zX}aHI^N{1hX`(;eBFYu?p#OjeeS-&m#Dm`S=d2f5k10Vf>+uDRyX*0FK`-lZ%1IE6 z#AYD@-R7x)T+cL`j|9~QVA zH}4ks-wHXO61bGFy9c$O&ue4nBwRBQ`w|X=PbG*%|9=^W!Otd$#JKtj4uhXV5Q%cW zio@WPI zAaH4?#3lW=1%0``re28$J#Z=i3z~)}Dc{UJh)em`63{8A&(wRpzUE2F-zjh@->ey# zE#)s2^g(@1`6~rpA@DYVR|&jb;O3HYTFed3L4D15<6#o}G$vwxb>;ln`kMH7fp-gh zxxj}6uDeo&a8ANRImTDK-@zXw;yjGM7?VuIOq=5UPx*sH{A6|I{Jf~I4ZUu*)>{HE z74$Y8gCuqGextxA6VT~?flo0Y&gO64P>yNi%zi=8Pa~ky>jIZwK^IW~lCj_DcD|o? zaONFQRCw3LMSqsY`Mop~PxR0Gd4m)eIQ{tngJh#L?SKAdjW5d8U;@J8?_ws zJ_+e_`p?R~^__{9l~yI*_IA!~=BhW|(Nf7b?5xUF_$tY&T-n!W;ZqllR^nwp`Hq%) z#rj>c`H@0ycEPV{D%Z5)eP#8XP7DTh#Hpt+Wp6j_qUF_<_{2%x;@hUIq5WMQ?Kj3d z)?0b9B^A{Czy3{{C=;yBe|xP!6%-)KuL$wV=CQvKbH$=>A^TStuQ{?N~X0lg&N26ox3U%JhaC!0oC>&}zo zCUQDj+LPa(c*rYKmHcl-1cAMkeaODC%m z|JhW$5`S{?L#XlA8@0dUZL!t-E1ejWdb<$?f0;Ch$)}k^Nr`p(#>~l<;hxV3^>iU~ z_To2lbu&*T|MkbgzY4TWW-dhjOK}{l{*9nzlJ;MQ`(x$5M{#H8)YMO(A1nX%0{oXB z2mk#A_}3o?|E>c3A3F~IJq7r=HaXVvoB22AluY#BT{w=F|EFX^GHW36b6tL{{4bCh zN$Tf%`B?dnDDGVStdnEqe_iuS{a-i^{$f23m*waFz_IF|r1_=(oyWm%-v5#OYmS3I zR6zeV$H8A!fPdX_@LyKIe>WWme?tNNT)(^h9|T5Hx#q{M#lKI;^l`cP#(kMWvp-w| zQf6HiQdjq04?@Z*ZRzMm%kdAD!+@r?FL6!m2*=42iMW=^3zR)^p~CDFb17d=GU#i z2|0!I->dc8nv?$LSmf5909HtUmDYcr<~Jv|e(nzy(toIcey+pa`uS{y`p3Eaw@mAI zkL}bY(?hr+{~QV8x?S^Ex^U7mne!9++RcBjhyRWJhW~lZ?`}W4H2*t>kV=tU#k8IQ zru?UJvp*Fv=i2PUu>Q`$Vf35!^9nGkaLeW2?O`(Sl{$BhE_2PZAI#);$A6P{ym?+{ zc--|*=0f%VdW*xcLN`p>e=ZKU{rr2Ch4fo`)9f2Yu|B)?f73(%vbe)As`;qD0*71w z(;oVF9;5!JJoJ~Zb`(D?^q*fq|Jxq=SLh8r&iQ0=m!JELh5Ua+>tCUnS$@-35-Qht z7=Z&3dG)_t>kn%>b8_pi1Aig?O&n~I2Aq@X|JWAY{vYwse?;py^jb__|GULQ|9-9C zl#}JJDxm-0JoHNKVwcNqu=niYkqh8->CV`-w8A_+~r>d{zCO%*XD5a_?fGklKfiA*qaW4!)^CHED%|BiFvDRqe@D-`%OC&w-#Qi2 z|GL&csu`)D>rc1-DezAr{dFOS(fqwWEhex0r+~kZ{#{)T$6?`r_9brpYmI)RK%dL> z!1-R|Ki^|e`d$3nIk7+*a89cI|1Vs-^(Vn!3LMp#lWMenLvJ$8y*Uqg=r8YgILb96 z{a=T}@EQD*y8Q0>N1fKMn}L>+U;htlejnPgOv^OCyS*6whIW^+-^E|0?Ps6GMEe)u zF#N{;7d`B6)B5!^fFe{f{N~<|i3{V;ZU2DgH+l@co1Y7ZLh-jk+iz<|+P@Ho;Wu(W zp!K``zfJ3ZulYCMPbp;oSC7&D1`qqcTEPBD0sDJB^p9%&HM;!fWXf;u`S)52`R|C< zzd{qze~WP#e&fHt_t5{k)?cCd&B^ea`_Fjj$De;wX@w@F{ze>z-{{}tp+7j=VXV>o z=4ANI{UaXwcWV9HG$Hk0iNo+4{fEI%%pJeWw0_2hOz!*H6AH(V;Wzq>4EO!{;4f5v zb?Y1sV<+ujg2V6|{5q}Qv~#0pg|=Ux46I_EH>MnXztvo4h;VVYn^9-FY1eChSNCIW z)BGD*IFXF~21=On8$h3zneyw7o#nq8hv7Hnr#)`n6j3&pzxe-nbO*|JHkRUW^Yi(? kd+Y#YjW+Ptvi?K!J9qiM;xeZYf6vVh$H!1UncV*S-!%&;E&u=k diff --git a/UserTools/template/MyToolThread.o b/UserTools/template/MyToolThread.o deleted file mode 100644 index 49c4764e0feff1f8f26bcf2acbaeb4d0d1b69a91..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 37352 zcmdUY3t&{$o%c;9kqDYetdFQb1{^h5O*6XvSG+udqQx7J!~Yg8Um+e+=ru3FNrSf;Upts+)2-|v4OGiUDHg8}`% z{r2dcd+zW2U+0|vdHv^3SRV{8$jrzvWXdo;Z6u#0Y8XxHlKFC$EjO+<7Bkv2@q8Ap zvvEzvMf_86O~qA+%ZF>ONvT<79C4;Nt<;<`xWi*f&y$d}+A5c!$7Uo7%v z;(iJ4mx}yl;(j^qpBDMghj-9Geuj zcEyPb^iKZ8rgdaLm6_IoWsJxnf2gX!+*OswNvd)J!;o4kwC=1bFu2AEkn1f(WkpFv z-C}d7otaZ>TJM=d>x*F4i>B2+0%=U)&Z>h3*F4nMO9t$k-@~=c?~w9Yfl%T3{IZH` zp9xvpMOAovR4j%nrlRll0@Lohha}lgZl>E%%%5XgmHr&A#{L#79QbRj$h3nfK5guS zu`$%!w99kMvOV5`uYyZ92D^vtZ!t-`m|U{18th9$c59xAOqFSO7lf??LjEm2b0}C6 zwxfAw*{;}>6iL+suf{;SZ}exPodg%~?)?c7yZT5SC9`jP0qc9Jb;Yu0WdD!tiLV+P z8`~_OE0(M@y=_ldT+JE+oe3}KKzsb%-0*r&CVas3UNU`NPKGzXYQLz`#bNt~9CN7O zzm4=z`Sb{@97t1Bbl*U&|%nA^!*pLVZWP-rEW$v0R3C;L#j-yZ6x( zHVGj%gb?jFK|60}RuLMv-@k_>+PAPSC<;@PLCr8gwEj|aSHJ&RiS{F`HevfAR+FT) zN+X`1X6@G#jW`WC(TGkfO?#c+2UD4yDR!k;#e2;hn$0Y?c9D%GjC^3&JFp>#S7(?E zzI2_xNY(~b+$yjffuWyD&7u4Jn}LUiT1y^p@1lme&)*9GT}AlnXy@{k))xO3`g$5?D;wY>&*toMcbK7bwf zgHYH(gJ9t2OaR$&1RPYW?q4tyVxOwEyTRB^j8K0cTrW-icer}%6)1eN9v{>+^~}1E zwIhsv=mlsFmHWt7<4G;?6Mdy-jJhXx-YJx|hFWZGeWUfB9u*m{Fi#Ow>!WRSkZEP{q9N57aUv&jV* zq^YyX3+OC#9W1X$PB)uSB~YE1-$!tPoLwK-&#i-m*c#NDP9_Sewz z#OA*Ad;$OZNd7kFp%`)L~>Cnix{g7XOO z8*ZY!PmoaMXcjJE&ulK?t}6JD)gZ^?MbiDqu>c^%R-i z6wZjpPJ~-|`_IdvK5Oy!@HxC2J;9yfQsl^;nyBaVypI}~#7V8Nf_%utV_-L2;R56D z-dpSZHQ}L${G>VX0@;Jr)WmQilY|KcC}U2r2TlNb*0OM5I1S6vq(4k%e#k6o4s|z$ zE#8U3o=VLAz0icdxfb3AL98jz{1tusveTOWKG{1>R8&v(g3i3e%*-5;JiVT`Y(J}) zFp~9qogV}19U6rg+vEgWxWl2zFiz?)P9T`CCsaC^Mfp@0l^k4|;;B+6*_CLRUTT=K zy~4yG&CU%df3gHPC!$$n2g`=7hc^R$vfRCl?Z52FSd8OX+)Vq&L~Pop;ghh{ZP~D$c0J0@sp7hdh_Zji{>?k^n((olc-1>& zV{Ba#bN3&Z6k0uR#ae{z+`B)7o?f%;S?^E>AwR?lwI0_PL3b1bir3)-n>xN9HLbsx zR#HkS@jy!aQ^B@e{Mh7qU?MyBuk)8C+Zp>gpWz>9k_Y zh>b^l9~?G0pM-3jTFch?O?eQo4q&g#dN_U_S_OL#oZW6J3YWbbw*Fyy7yJ_2!`KvJ z3)!oo6YCIn8d{MChOJ%Q(@pEyz*u<3Ve@i6gY1Sz^Kvu>CXmOFw{|^spfLVDLWc+z zllpk_*fq(K7GL?kLjYVaFTMorLr$0zzc-QFLX^PF_@C*-4ki8Zmm9W2H|J87`FdVP| zPA!NUW&Dbtly7*Be5h5yyP7ygId$R zr5sI#{oOJqRx}rl9XhL(?eg}Y1u`y1ZbiV{e;P4cuf!+AKsFDBt>>k(?l0l|3u_m^ z+UpdtbSehQ4E1sQG1Yw+(2(_V{36^F`!j0M zQ7W+4Lvj@aM^fbCAU;&Zs(zM`$R(RF^|X++mo0T+D*<+ok{9Nfc2Bu!FQxNHP14LT z%*5#_DRXj~%ri{uTFe#8FjuHjnhFHN&pJ1%2|DHf9#orUo@`s3t&J?ZbSkz41GTPt(o=P?;b z%aiu|;ug}$SORzyqDftWQEKT`89A8*Y;*Gsy~_Id{nVrGid z$GEhQ_R456HJ7*lPB1dv@d25WO((pNpOQyy3CI5gLb=++Z^R;zB5|M|#c}#D#>O(T zinHt4Js&QFXLB$!`=xy2$;|BE<{S5AWk2dQwq@?2c+mnLNo6dK>p&AzM97|zXp7^S({-!C*i$OEgDpZ(XV##hL=2lKPv zo@yM-&;EI#ar-F%hEC6Zu+Z3XdiE<*jr-34@O(k`ZwigCoJkt?owXS77ba)lUzo9D zN6z7|R>Fnj0fsvCihURhL{6sg5+Zx8bR~G3IOQZfWtgwa0v`8(NxIFlrn#llz?w zuQ#!c3-n`yg7YFf17Qb*UDJK++YO31vS17&p_QZ{5qWGPk3`gBD{+emq=`9=p2HS` zvJ?dFB@a=VE^*2t*hGl}f~H-~BXIU&7N?{braY|iIAsZtLik7w9N}bv`i_7K0ZW?#mHcQ!6~OpKz%m0& zO3V@%7Gxf?G!dF3IV^TrLoDTdl+$E@=oB*|8M`DcXgC(Tq*;J;rI3h_rifkUq2FbN zsd0VmnZ+(Ug!Q6pDAtF%7J-1GfueJ07Gna4m;|tC4MwsvZwW~QlZ;|AiH|cD`IXCF zp#@uz@X$pN0!QKB#@J2e;h~^bK0i*~8nzG-Mlfx= zkh&Q2Wj8!bOz(2~#a9RTR(A?xNqqUkEg8LAe7_-PMsBUOm&Hk&Br!ZQEgVV*R@R*8 zuwmFj6@LtOO62_8a5t@o*d1;?OgP)!Pp7~Qm}v}F({?|Os)Wmiy|;}ZW7>oM9FzoJ z2pmEegl0S|>L$NAe1d(qWR~ss_HU-s1Fb3E{#M4i#lMxbzy~(t{&#Qt1I#3E{sU~V zu!G38M2YA-w{MGom^c*McTRu}qPXmUJm_$bO9pLOS2Mp8p~qCegS#{({0tWMq{^W) zL-!vdQz@rBTPk$Q7z<(9j9h zITWheOrjA^CC0EVn$0d>%8kW0nqg~yIAesqUczx}6WTItJ;2&B)b~EVUV0c3k;9iX z=OBj|T#(}N%wX$9A|(?LiT9xbv<*>Uh=y>PBV5 zcAD0eEi>DX!nq#e>V&s>^B>|)CaOde8Mo{}{63r(#5fCC!|?+4*biH21ib1UScl z9Ooatf>R6xRbI4a>lTo{+jJ1}hhNq&b%p23PBY!3;W4q^O!NfAEQ3yAtm zHT(XN5xc^a-~7oU*~<8C_#SZY%sK;R@ZO2vi1P}?m8qUI$Yd?!|9B4tkMeqL!X{d% z5wb@AQy-Jr8?T^^l64^dI?P0DW%NwbdS>(zsnaYw;2n4uhG3(wobIKSsxKsFRB;G^ z50{G(s}8u2Fj&=5$|oH%sh&)84%+zVShaSHBA&7%9u}KH)-PoTx@Y5{MKz@Up}lJV zCqckQ0&^#{K>R&42NsV}KE=k5;E@rGZm0Ia;Cv?D1EzQgACSV5vft@@9UE~BFB7{H zL}n0i`3(jNjq3u;PB4lFI8E^D1&l?W!w3lho+048M40DAba=JBibr<7gad z7CRS;HT)R345-L3OCy&f#~aRs*CD7fdb>Qk;*{tec#bj3Fo%qPc>8}O6(yEsigi3n z)QEMQjiybD%lxa1z{t4FCBlHj(K9wXgdowo=rAQd7i7@u723gOT_O0kth z$U-t}!xpbP<0~nlnGa#liQkdV4j*;)tJI!|h{afRryyR+*b{I!1x-qo6#$(lQ_6TK z2-|D0rqhn!n&oFf$d^HA+Ec@JTh8dI*w=;Z1$jS#`_f)0c1_6IS#946?`-o`mmR6L z-mms9*hgWKQ!v|N-jusNyjW3)m6v_DNP+UQXK(q0`8axGJuZB;xaZby;O&>MLwEu9 z+@SuqYU^Nm|Do;`B<@6)u$9GMGYH#Rei$$<@WQ}w*lYeR>>V-|AF7^lKzy^eeuel}Z(Sq4(Hq?%;)z3RFsx}iTh>t!3&`o(v`_NG1dMF#TAO^R zXV`8A;h9uN)D+c>H^B7GAPNQ!jrPg-N-!`S??M5<_zk#MEHQ_wazfVcl5eb0WElaM zo!76*K=C8`9uPYosqxl3Mt`vOUJ#nrf0<>=3SxWE|4CI?f9)nTXAd=J zb=kglRw^&{3uViEv5~ZTcA%aesOOIAve%%F-Z7$j zUY4lm>8#-niWWafbk#I;)fDc>AHdy{+C(6+991{_ZZ0rF-ry^^c2LzUF^BGEZr;Ys=G+`)i9SXcl1jgcL&hm|uORv`qc`#RfGFFB z^Q7V=m3wr6r>e`r%N6p+~^s^e$%MI)q{)D{v09m=xtt=yEWVM zyX=W>t-gSCQXkS8ky7?}`mn?EdZwE+@R#CxM96xF1j4o<@`piBMXqmS7J9ueyDGOl z^ZALnMGJC$=tkcEp-Ie+gMzo7+WP4As?4p)djVS3c1B~0F% z$%RBA^cZa6P!OK=7-ytm1bEmClV0R8&P)dx{n=?ab$=%G){7E0?})sI{TfC*h#S>4 zQ^DC>l%Ow6B~IYeH266h{9FyrS~{)FW5}5zS@ct6GkMQ2kjA)79s}`xm*g=%>4H)_ z)31EelgFT6{G=yOVs%I@^B6Nz0rBK9Xhu#=`|wEiOS{PrW%3v-6f7UvokBpF3NBl! z33!I$CyE%q`?rmjTkT>=-=UIPCL!`VjyqYOKn!sDFj*Njvw2#!LJag_t%kIL6Rx3{BF|ysrAw}-!Qj0z8XD6u`J$Vdl1YDBGaJDih@)#3cs7Vg3GU>@;1mfYl(6lm-@rg8y08e5Uoh;6Wi=3jYYqG^J z5_p~#Re43FQ?>e8U;M3VJV7Uq@{}G0to6swJ;!WVxZoQ&JuFxl8C{8ny^Q}*m z`+>XBe@5Wj1Rd?~DJ>>BnaO!rj?5c@yVdIsqBoNBxoqcaHS}N8;M;(colbjp0-vtz z6X?u9>1~aiSr`azc2;TdjT-zR4gRJE&%{FErng3e_iFIn8vJDq9zc7z>Fw6w>ooY& z8hoz?FT~V5UD>Cxa~JyMcfi@W784IULjkAXqq*4`z*Ih6+1Igi6X>4t!)*WjmM32~ECpuw-v;NREa?`iN8utd4( zJzs-w)!_fo;3uJ-+~l97!7tF@^ECK1go{4mJ;Fa%Y3O6Xrz!hqc_O--;mQ58wE1z3 zoDoJZXIUe00(_6*$$hnyb2>89jO4!hRAFo}!^;z<7l_;f;F32{h)g@9pO=83j-@X! ze0~C#a_$1|OQ1?7a?O=4G0Do3;OgR*_Qr-5yc^ZfWq_(SRvKBfJQC}SMnia*8;Lab z^aKKdnd&Q6p~l)+Akf&<&R{ zjU62@p*9vSjYOK;o1xh#4$q%UA9LsW$UOdm6u(MO3zSI!QuPOq3mmHwHn z_RsR}mHIA{%&E;dH`o(x?2bj<)Xm{x>|l*E4DJYZE-Xm0a1aY(ZK6jo2CcpE18MzV?|1K;gSMVqWD5qdd0+- z2Wg&MDK(^u5yFqJuX%l7!*Q`wtJhtP0h#o!XhW;UyNDQS=CJuBDl2nbdOsSo?2Yt} zqPc=q$z?9oc*K0yPGN#7mlStKR~APit9p7O9nsFN_BKp4v9*!ae40vi!P&L3_Rgpf zS9f*l=rbaV+txIrUuNg_E@88-uh-Lwn6A3`*XrdPI9xOiqD zlIn`0?cm09B9V4p@kn!9SF|(cYQZ8}fw+-^I)sU;1Pj-kNX^39iz6-QAJ8NgN(gK0 zjJ8JGVzhWAVOqRc<57Pq17VzNDHhJ^NN2R8rJ*qzY;CPs*x1SiB|0TBU8Q;jceRUa zqp{;!^-#^@Gq{XiyoOt06zo>?;uI5#3!9@#eXOTf@+OkL_*3{X) zCeqTr3h#SIJ3HGu(JT%X$K>b9sV32HA9>&NA?sw?o@ieBo(Rx~?1?}@^-i5fN*6TK z9E9zT+e%rsIM$2_syW&fC|TItBEFptE~>e*Hj=)S{}1g1XU>8bt%}CP2#s{NHQ(4B zjdZkk1sB!MjWDljOX9RwHU3?5%+nF>hlnOO9x5d<2)y8Hf_q*(O3z6m+ z_QF^$*v^;LI<2Qa&b=-3BiAXU>Nuy6MPd$dYzO(o6ezg@OJz+%CuZQ|nxM$ug~vXh z&TVXO?P%!45ra(`ZEI*cMeCO(P8)nth%{g!?u;(Ssi>_nx?D4-*Oo*U)v}4wy^?`i z_G=XS>^SaPDwRwwboh;nW(&s8U~}PbU2EGKgEftqg=y(&?!@3}cIoEijG)Se-uA1aTyEu}r$kM?Aq9bbf7!ERU{gZi_TF#2T9f(ivqdiFk^l z$3-bS0?S>`EK)C<1`}cm&X%rjEIuM{Ec?;IYNNjCK6tm>Yc3KG1btR4&={k}{weCCNx{wA>wPhyk>S}g3Cv{;W z3qwwbbxlWiXEd_9xii+?(2|H~Ux3iXRk-NoG)hqs%0L*~s8jGS3!La16#RP%zC^+I z3Y_GSOez0efs>q-3cf(ZBZ;2gkdySC3a;97lY-YM^#7*dH!JuP3a-*`*WfQ}@VExA z7O^t2zemyAg6|Q6uTpST zPNN2&gE&4VDqfFE+Vg!4{%Zx_sL(efE>DT%Y``Vu+^^ubDfnrKr&A*OO}HfeWPy|a z+@Rny75rKS|EwaXMZuRU^sNdWQSdee-=oNBSMc{W_$LvUr{uKrN)5hV;8foZT(Z6! z75dLB_(_P@QzH2)ezm|!j)hCgX;5(0{%!?V{lU`UcPaSoihTNeeUzN_-L1jniX2t% zL4{uJ#}g)U`BblExMaPS37qt*{dJQWu^fxNFI!=F}!OuJi%t)kHrC*}Ki}5`N zC88g~CG89%9F`gb+-A86>a#VZ?5dnO8;`bD*8x`zII4Skb_zEwjX z)6lQg(1&0jC9<;ymu$~A0+F2fsbarD^(B{-^cxhtgrAFxlI6e`bJ+Nz1E;xz(w8;( z*Bp43u;bqy_%eZi*MVOw@EEO{R01`1DE@`-#hT#g8q*VT)vMI zci>fm{!It|TYa8vGO*h%WLk(BSkZ z_1x$eYVhkd_&N>Vufe~p!GEa1_h|4JHTVGyo~t?6_%-;28hoAxU#`LFeKNOp)}6EN z(a;N+jq4>4D981ATzr~e$@i(KIcV!D>95q#*K6=54c@Q8zpBA?_Mfkzmv3e`?WeVg zl5-p_=WrS>*Apkb&i?x~a{gU|tMyXPJ3bA)tS{}i)pLaGFS1iTNBrNGU!k#cq6U}i z71>F?A^l3O^UiiI)5xjT;BsH!l&|x{PK_M7o;&5pb<%0)T|$ntU%sWtSNmm>n1_kJ z4VP>O`spYolCPfc$bdiGr`t$dSL7B7acj-=?9zTZ4a9!D*~X`@bh}x&;*cM+&{F_c?((+igUV zLwidp|80#NdT*N&)eHZ0MS}iM3SOh&aegbjx=5fr6{{?AG9~Xz*K=c&2(ze?Y<2bNX`%uF}(c%#^4a zD*gFNyivthD7YGb9uzorvwD92qC&5pm+#Tg->;$nje@K5Rb0W<_Iy*p)phM%1y|?8 z>vE_S(&wc$0;l#-`}aEvuIk;X!S`tJZ!@jD{clEI`pM4~T$TSj4Sq<2)8D73ME0w4 z@|5{d#dYVbPiyF})!=e|cIsWPq2HpxAJO2y(%|xUd``W(bJi!6c}Z=Dvo$#VVLnPu z`867x{-F^!`gRR|rv_K&bJfmoYv_NW!GEj4|Ej^4bMsk!)p(u!K91T!#Xq6+OE1c# zKYvam=Uxqdf=12^1y}X{iw1u~!PWJSem6zQX}_u9x1wCyPjr;1nDP=AVQhwdQNiUe zxM)54Ck3C1JSCFzC0tU@IRqk+>@Bz?P8cPs#+fXa`a$^Jxbg%nrWr;r_rNO0(dh3{ z2}Pdh@5ALL|0@c;EGPPZR&bd;njCpXBU)9?mnAdqYQ0qXD*a~_`T~(By^9oFm7~g6 z<;XKHv8i%Y+$Us`d=*#aOW!6^Reqh|rJE{Wo|On!<@^f)BB^q|AVJ(!ISUosC-P+H zIt4FM@TCe~qTq7OD#gsNRw;l_}1Fsj)-&Anfh6KH(;1fljZWmI+Ad&1zEXP0SPXSUQT#j*ijw$%b1c=0c_nWk{ zSK!}K=vPQ$+~v89J<|hZQ_m!83>%iEczM>s+AVaxNmeX$nsCvR_&h z{8SZw1t)u?ADk*CTq2|TN?B7B zoY^nzDOK>4d9t``ZELJyxlxSQg!#Qm<~pM-4aIo12S4jrfnS9f#ml?847>p@&|-Q; zFZCt5ly@rGySm3fPH(ZNlK&6AkU8$3=A>Gi`m&E(?NeTGr=KFF)i?3$qw&A|=A!7h zUZQi_)7{q6d_%Nltx>$H5pN#%G)6mO5qJpR`YP^hCx>uxfTO*hPaYO1#*czN0(YsS z#T@>3H1!V7kJGpQpS}K^>Tn-t6^`Zg-Vb?S`Tq|iKaO{hrKctwH!<~f#TxMbUWf20 z`fUdOkXmO021~-Z)89`_nH__ImklJ$sbIu-N`G@PjY$r@bot^XN8bJ)CO1Kxl;`#a zq`NS8N<~u2OsA=7>U`-=^kKh|b2TrGDN$GIKS#q{?OqpG%A?kb{R*u3Xg`s{FDhH< zH#0^t{yKlO7=Ih?BKT3`4WjDNrpQYA^L&_aJKNz~A4}STcuu~Hq;i4Dy-Y2hFUBRd z35mOozfSN^7I)&GbqxHndaC}}$H3n&^vb;_>A(IM_&2-oQ+#Q>^}o-BpU%VM<(K<# z)&32~!2gKQFZZ)le>%^OSN{`&U)4|N#_{q$EBICY_Z$QN9vA&|-W;#~mtFMJd3(J4 zue#``^XPc_54z~5bNYDs54-56bL@Eev&6n$?Z2yzfnUZqRsJQ%z(3hVf5S2G%fI8O z>R)*b{Igv2uW;e_0V7iN%#25)T<{-pOpbX}sHC%zr#>lHFv^-DXi_c?#WeotoF~Ua z0TCgchl^B+V;n&&UH%D~OT7!T65xf%kxU)Gliv9}2fT!xAkLG>vQ#eg2P7cw&iYe1 ztG|PP6MgxERKjlt)t}Cl&iY4zx#{l}`pG^uIrZ10&`tkiLcb}PNk8?KQ~wrVZu*CX ze)aDMk|?MCJAt|BH?AXU!>AV>L;9!Sa_XN4H04Rvzz|<2F@`o z7)gI2E~oxRjeehS(1!)TygBQCokqWJIp=s$Fp~ahxSaa?HTw4m{g(-Td2{Ng??c?` ze^}_R7mTF;99&NQKhx;X6G4!m;FmY2{#_dVOB*>yk6E+e<Z@H^+PZGvC^ohvEB*?->!zpMYy#Dvt5&F|EH za;_yrxTkSYqkk)PHj?~%PW&l-{Elk$H_?~#NLx`(iS!ralJ%GRzlZsS+9Iv~MF5fN zWkd1DwE64X;CHKkP8(-11s~}z!R6Gy6wGe=^F+tVzaJ&Ur0K5(znlIVp?_E~l78|r zr~WDEAJXsizctiZNRH|!o+Ta-{Lb~iSMbxAQIpeuE(AZbUnN35ubl(x_^0!W)BZ~E zQ+yWPkdnSB^h{{Ebs{!K!^^b=~oxws^s#A`(TGlic?J==tSS~Jw-tbecI z_c-|X2!1EM)GuY;EbVvjZx!~F&ru@#FTo}GrTz5x?x-)F_KyhtVi^SmPfmMY)YyM` zoc8}pWB(o(`!9zHPWx%%ce6i_UIIaq=U@JmK7X7m_?_)vB=}`JNq(tcKKsD$HvZU; z+dyOrLTdkVT#{ePy+UJuxzO(w{PHIG<@5C#`)kH&e^g_?*}*la_Rj|-^-KHj)aY*# z`hC(+ahLoOzFnh#vy1*9m?Xc{zg?q$lh9u(6>0Q8snP$U&|fe3sQ;>PIrY=~OXPn} z{}~=<{a@GUFTIf~Hu;T>-;j&`vk`xC(?2rK`kxMdH~;Mw`Zp{3>3dyg{mV7_3uv)b z==oEUUp`-|(Vy4JDP%uW`&Z*~>TePHOQb+?_X+)qgoQ->8|DBAA4- zE>WsfZM7CFS}j(wT187S$kS?-+E!7kqP4;{D%1yJtLFbb_py6+b_NpL-@m{A=YKz$ z%*^+md(OG%p8K3Tvx`asQ&Lk>T!soM#yLiczG3I`J@K1~;&P&~#OM;V*|?`PIXF(n z@f{pva1eG1j$9n4;>g26SU!%iIL4{_({V1qQHbLVg`J7>SvbzdaSo2};+Tko(ih>F zgkv&}VjP5(;FzMWOK~>U^|?4tRoCa^T&}J!zo#)|PtFG(R`Es1EP}lY9+<0UC+mPfx2FZ^VRD58g;%7=j(CkFym=5UxgX|m)Y?_x*2g(gc)fnpBuiY z!0hPBG{b|;V6kljSl)zcO*!+XSaEYUUT=GMt%V8iU#m?aZW-o2It z%neWWnuTwNGR=-n>6grk5|QV46rQ}_Xm%e)(9`dN*6f}@*Jd}By&*)*&Gj7LNq5E! zGyIj58r~z>%#BBSjz_A}EOTpVCefPV()9eEx#7|RrQ(*-EC>{N+PHkKlrM8!G<}BI z-6V7tw&-4zjnq5LaMMn369TuC?v=`z-5u@VKG(CS2l*vqL(=4CEki^KF`i)^cZf)r zxwYc~qC>c3R?(#!{ozf*1ap@cUGy}$%6jD6D7vs{1{9Hnv9;D*{6SW#GC)_4XDOxL zH~FfRwxi9(n<$1@70oKTtf+D$G9&F1=&twW1yso(RTrf)Ba>(5zYNrj0dL-zU^>c6rSUW02He5!=1k6xJQo6j{P3b_fJSe zWXJv#PuFW`R6I}2K#@EyNX&4^m$`I>ubq-drb__}0*FmV?zg0+LY>crCc?ClYkisd z`;=8ZIi0?}prW&ZteaaqeLKl!#7mTDr{F@x-Rawnn3z}A!mG_pGg7kK3_qk43HY+i z@Uk>goH$V_Q7~QC?Utrc^#E}X$S6U#hF!1K*Bw3JC5@nEB3X})#`QLR-O)3VNGZFF zbbCA+wXM^)68BUPJLnwVO2y#iS_L9=d<)F(WxoA_V5m*%!&PJx>@*JEfFJnlBJu<=YER z`OoFQJZVds*9dg4lDbA77owH4bkGFCyM)Gp@cX(gXMw3T`GIJfFtWbI1KS=Hslki1e(n=)Ct|eK3z~ivLAABsayX{+D&V=jl3>99h?HS))(YMGrL2+ax=hWar{$|3Y_XJWr-X?G@hAJvybc<@5ju@S-#$-W_Y7apw8k(A)6gy zv^|S1%71x1DriDyv3TnmlDqhWbi@eF;`Qf6#Gq17N3*=IV?AzCLm81IIHDG9V(KIz}QaDem)5JNwgSxI! z&53GU*S^qTGcw3@HN%BZ0aLisb+Sr6OqBqykgpwQ>NaYrMwwyi`dTsAFgjN6L{y6M{HLiubhP#=W$pJoo$HXasQp?Wnfpx?5zHO(CE1%b z>s_~kidv;A>Qg+OL(o18ouetZlI^i3_oIB*3r!*u^UO2J10d$qulxEy(YM{Res6R% zZes0i0PXEW-N(l*)UgBUDAS+wZu9%-y8+RVvq~C`hMc0gPxA@8({OWWLW{}9neE^i z8)v>jc@pEyLvTgoOkJQ$(J7db5rSbY4J)Z(7VW(pOv&~|_0Jg1?3){2=Gy^x5yQ@o zo=h5K(%>@QPe?NWrZFgnlNc+MXVS=VJ9!y`87ac>a+$B4t}(X2kZ?tsxB<~pfE3Kg zLJ&>&c>^hlGbhkc8$f}Wt`t}D`T>2d#t-ed*Fy`CQoB!qlC71OG#;L4QJtcD;?ZK5 zxtx;0GTZ1ZEt4$^B~lO9hJV*06*(^jEfeM;-a0WQ5M4KcROV)QcOYda4KdL!jD_83 zd{!Wbm^)DIp}wHKnxE8MS+kVbJtP*a^2Lsh zoiHVu&vnv0Y-)&2P2diU*+^#c5Ib8BvB?Wn@fM>-V|w6KW4lgYJL0Gxq-mtQ7HtGo zw55ompxNxlwP3@V$SBI?@r^78>YBwUUKId! z(2N@~YKH*w`>N=dWDlC2kPdpb7X5|LSQdyF*IxlLG3d68_b=P8a5aFjJqfjJV-#pZ zAhUv*fX-vIkfsUfpEINH!03tHpPmpnoo%KPdVV2tCH}>i$cip1y3il~S}innd;qup z{vBkp!tbxd$x0!ks!?fVc{qIF9b`S3?>S_!}( zSt7WEe`n@AYi{_b%M8C_7XIC{X|n9d}oX1LcZ{LFL1S5zK_8$yHq;Uz0c zSmb^(1!_U)Vn6!HOj;W?%}5(&-?E}&Eb2A8OY+RftW2|TYv?#x+kpHYBLStOV4nb0P>wu6NeL*J9h`b^`QArof+M>x5ghX0;fP?A{`i7#Pt6!s~5zW$0p-v#YpEQ)KfO=_kPfW?3BtQHt>6j&b2OS*cKD;qd z*r&QHe|KYc=Nq2R4iIB1=A%S`$ST#mN_u2K48?@P(~}hT4ATn#6$TZG9$di$h2rXv zaeJUbY>I#GBkgX^2{gu~x`VJ$pX+kS5N zao-3Sf(C@k!3G6J#d}q-Wj0FcNL&V~5XrEZA{oF&;!=0v$wusaqby%m|(mbE4 zo`V~j*RAk9E@jY(f>uL`onG4#q?rS0q{O!lRQwR|BKB!Go+-dTY2WZ>;gHZGvSxux zq5km0l<8+sSdwggItiC*dtV}LK>{b8E=$K5m#4$idmC+4H+ z1Ljy|#}O+_GyRb#R)QOR5Y?qN?tr+%8bS%|OA!m=zH5fN+DZJGPdtE=r}H5=v>DDd zJGP|z!;%-RD+%N4N&c~*#F#ktFpJh1=7v}Js*pAixgygutW&H&nUM;t8&I(br&}i) zIf`5$gh*GRvZP|035-I#Tvy0)MVwflMVuecv&4dfur%hwUOWlRhUde(n3G^o9Z2cJ zgI+-f5-e|^wI;K)=8;11Ie1w21rtTzPLECCfYRT{vu!EeOm6nnicG7EQT`p(?E$nyL?V zP|b7(@0p};l>$X%cncbEiblMgzX>sXAy=y9+b1fMy= z=UGyRZYEx)vr;7Ds0 zIVY92*NjXMiE*9Ht})1s;RR}bi3uIx=``EhQxsmZQ^=C4A@kHFpdH7Wq1o}QSV7xH z2m-m*TktAhySzDL^y_#^Cx;cDoMXhu21=oJ$fv{5Dqe47RiOg6Ru}m|m};7H#gU$j z>S?Ju!ZxT5UaGkZP?MyOk`Gl;MUs_9R1i&ynsW)-e$;PI(=lQN63qy;&X?kmZTK$P zM0e9_0+<;vvmZCLbR%pIFD)nA%jGy!g1bbm_}^L?Fod4y@1BARp62rxZt`@}&JyVd zc6G-Ox5)OR26=T2dYT$B)i#SvZxn_kLX!w5n&Gzv5}O%eo57iBzEBoVsk!yuNb^$>g|0_J3RgKNmiZaS8dmbag6?DvA&s zGexr^W9eGdNJPRN^w=bx?7(9a8coo=rw6&!<8qq+Y$Hp^$ytrRrf^>=)Q z3F=#r2<~Y<%&bHq5jP^HlA^L+PUkR9{<3k8VWucT`@&PRy4W0$sCp=U;>-z~gGhn3 z$e7LZFX|6wSl9DLk{@>8B^o5mPW5~>I^!R)^*GVb3>I14AAKFGBC&k{A9}iuru3Ax zs23|)VjV43II2)$g~TVNbuL*j(sZ;7<%L+mRN~5^VDwx~8qTZ71Tp(~I&X#I!jjRj z$ihT10HF4kdQcLV|FURrIzQ z?V}n-NF&5FluQzGP7WIg{~ddH1QnI2&N)#>$4h=Iq%(KD9KuGVf2MdkFD5fXZMB^P zUQX#_-ir2W8}7HHjTD$*^<9imWWN#9uL@j*+~_lM@wyXnCK^mJX)&gIhLw4=q+k#`_YN?$8fLXIS^4ustb=@4b=qyaSeq){z-l zELoEB4k|JOd13TJs6kwBPNHQyT{sW;TxRr9q7El6V~fRUyRUky6@`pf<_@%T-p(4uYyYsM=^ug|7DaT^&u?VcMYO0&#y}IGbGtOvjs<|QuM@nLoMKW#u=4ml{HO`xNEBkc^lj6>%EOlA@5by^>uS3J1iR3gqm6eWlMGAykKQb zZLsEwGrX0Rjo$3}wRJVM-nv%rx36x?@t$)wl&fn!<($^KtC1T^wBlFS*xb@obH=Hs zmMxs#)Kq_Q*#%{7^`SaiB&?oOfxPyIZd$M=Sa(&BNc;Ly_>rFF&NfbiYbiV}*L@XT zrwxHE~?wf`huese{4l&+yyFVRb^trRbKnA%xhZ?^e?7nxX z(S0aUzI~|swxPz#A#}ZV2;tEogn;?^^T7Pk6!#BOjjbu}7gCIOQry2xH9k*qf0<(Z zF4eu#Nszc64ACJ>3ZjpZqR&obsjWr(EKh6bBy6lzqkA+`y*=7}Lyqyt3GN$njNki+@TG5GM3E=C!Tz3;hT;18lPLD{lPFO~HWJ;O zO^INL?@h#Ao%@-=#v*qoijRB}-uQ~kxX0!GgUfir<^G4uc)?X*7%S4;kE9vDNOSK_ zHQL>v`_k>kj$)YY(Lu(~2f3dbWc+%td(&WJ52{EyEL}8RydWXl%h(GNfyg}mf&>N< zp>O%|L^t6D3BR7S>Nc5nsckJOr}C5SbBUoE_YyohPiWZD@?vz0{r$4PJMhyt;SlQI z{~Paf#5Jbp(Xofo*(R2S@kkX@+%8NF#FPTlNi3FNjTj4Ioks~2Hv%lC9i%|lV5&g0 z>4C_CEIbbObecf?5xmbqyO91t!kQz)32UwYkt>(Ur7Q#we&6dy&1iO@ry+fziWM9h~-{CmH`q}#7=jLAf}$q z7m!>jsvfe#aGhI$`on*To`N$fi_SJ4HBv-m5jdBEab*JEMPOShRQ;CXC^=KskMM`z zj&8;<6@y#Nt*uvh7S{hJnvt1!eW|BBKK=?o|8z)-@jNDp=SW)mOOMX7aE)Ry7+!48 z^ei9ed2|xbBhBzJzUZfN6dqIb@2a}k^F+PkOdS->j4risjhY>sve`CT)t1rwQdmqA zuPWgMvB)!bkgH=ShW1=Et>o6%G|QIGWy+VP;h`|dT8ax%%~k);#r~%AtFx~CFRMnVms{*L7s?tEMHXrc8TrxR4v#X;1V<@I)_9r z=O6MNGTN(&%q%j^?#VObdg}hD`x^07#tgeIiA@w#9Gf4=^d)xbuT$wu)Dnz-C65$W zLGg=CbI5UMY7^^9rGLmrPhsr;M$d=r&>M2PNOfx|CKnCC*4Ana*D<@eK6Z^Mh7QD8 zR0+JH=q6^dR|_59XCWs|Hw(W~o~woFGadr&rpFoRgY?T|Z_;B`nh4X9EBQSIl~3C6 zjCw&+WqJrFE2r=ptfcBUpY&d9ok5y>6J>wj_=(O#Yaf3eDrRV#m8c`6GU~C6Y81m8 zd#4VJo_a0h;!VLdI+d0W@WeSRm**)`yy^6si0;KwS1hh~^8f@TkfOwG@}SGX#vf>+ z{wSJykl<2kN=qOKTy>189_Q-^R?;#yTnCUE_UIkL@~*p$=o`>tdAxL~zKYGGVYrTJ zG{hpg+Byn1T1v%+psOYZbWMTHc zB6#(_l#tN_8y`mO#iB*b%vx4Vyl(b&FARs?LAgoT=cBrWYt{2@wVEW$?Nw{J9bBOs zm_pGOYmGQ)`sg1muCvjq!vCb}*f<`w0%D~tVS#^Vk{Rt6{h8%8lO9>nPgx4uuBC_- zD5}&9F&?bL(i?xYqmr>_tTbT3kRNTw zo7&ggb)y+QoZ1|i#LfWJ_)5R|5QXMMrR?D8+y){^z8+ULrXPTX5=am1A<>5+4VKkZ zx+&8&^WRehjJ1zP$G}$dVOoDJu9}vZx%G2avgJ~3T`C`J{h-|*qPdbKD0UbTS{2`B zG=M^nE=aDb2XaYUgGXib<%#cr~hnG2f%?9lzRd%)60PVG)R zN$v9S*P?hJMkzgU0{A!FGKLi{Q#;c!U^Gp*yA5z_ywx8>8nTj_95eRJb+^x0#;O z4QBT-o5^s!Tlr%TLPSVdpD3X92`}x4!O8+w4F0SZ9?-qhA_Z0;#CpUcy&j=qENNhw zB1gfQv1N)Kuzgu%o?Mx@6*n}4kc$@gKzbn3AeSk|3p%w-@e*RnB4U|>cGF)8;ZTYe z9I}PwXfXo2EEZx?T9l{ODLkEA}j<<^NEr`_*a*9Q`o77mg;J$h4ea`>pVT($Jr zicUzZPGm(3MDiHiMw9*mRyR?2IwaHwG%52jOQ5l8c)U+6qWuMgX5_d4cG>riKz+kT zjZE=z6I21bG7;WV7P%hY+2}1R{HiScWtnHn-)J5`Ts{id-4G^k$7B0CwKVO4oc*Y( z*BvezwM8fqypXZ|`k#08EVviUv#>V{)c2Q#_fG8G*EWZkohg`MGZ}V{r^$vOfaMk_ zFe?A$uAYF${5at8KZ{Iad2cT!(9dGVsXnAqmb1sdn1n1^GzS-?z_m5S;r+dv)p}U> zeC+W;O%D_{XNB@(ZjAkXPysbN+lLE^v;%?2Y_#Hy7&m!q~q-VE=+9tfD2`4Rj<^gc@B^*8lC zx$sU9n&H>Y!dY1%^f+(FMPbuLq!#AHi}7%q!oT@FQ~rz&C3F});}(?^njBM>^Edxu z_;24L&&?l0f82;%3@HxphAv^+f|K9l@3>jRbs$hr{xClVa{^bv%9;9ramnS@>^gnlW64lWMw?CM$A zK>3E0{2rEhwZEZomN&FBDWA>AXEXA-xvcPgh4tk5t{ZEUKZZs79fvj+K7= zHQYTeO)Yp$MAprYYqAW#r{ql>ndn;qfFg^~SM`Oal||-*_;t~27QUtPGF0!Mp_=W_ zd5i4b?l0VO-I3;6G9!*Y++M%7IJ_5mdNdIJsw}c76CB$fHVeo2Je~Jq`iS?eyk?{! z+brxgQ97O}e-A5gfq>SJVKI{ULGAb2){xqvDS=2E zina|rKS5PuJYy;!9mx5_T#OAcJk(~4%AhCq1|jFz@v{XFHBEXT5iXgCXI3WC8(Z+`N-2QlIdV~t*biwIj_|2( z3e|(>GpPJ9@=$zB;p6xRTZ%ER>6nS+gG-`r}VTK9C)uuWqem@w| z)R7d&Ryrs?IZa$aaXirziiDv%!sXZQf;+0qx*5OqnzFj zBlc**1L(r=(HL{0T7IG7>~r)Q|3sRXit#MH^f6TTCErFeWDz=|ZIRoCQEMpB)?Yam zLP8I=?q?ywk0uvYNp5798_K^SvD>D!lHSCnA{rAD8wr%dwS@VQ<@ixkvL5uBc+0O0lmj8 z1j}4`a+XQH9X;3}DtD_<4iZSzUaO9tS|Zs?YN$+ddz;Z=+H#!v0)hN!)y()ut1d^`(-#G5Gc9Y2cQcd&q5ON0LO3HLAl~AU(lKbwJLZ^5QXXt3B=x2(FUfxo z8&IheL01^>k2dP@4)q~jYedP6@I_~#E0X;pJ&qNx0?V4*4Fg8UpocZ_u0SnSXEF=h z|0t|PJ2h#E0=boi9rDe@*iYypK43vxaiCu>Jxs8p85;Sy70-l(LUk|AH zt{GA0j+A(#O^gpsjbcwH(8tFX<$nbtJXXrayU^K*Jr#u9NSRjq#f!Au@XOBOn68?`uQ_rWW$ zdJjQ4i|o9`4Z7c6w4NJ#Z$qx5qmX;LS&v0OGL)+CYJegVM8!MhNHx6oSTs6is#lMu zAZY@7ybdPnD65Ltjkj!sa6*>D=ut?CY>0r*Se`-`@D#}`a<;tvXqNi4UF=bxpg%8v z9PL+|(o)?JoZr-PMSi~AvQ}B$GOx8nkR<%G(vPRqc68(LxbE~|OG{HrWymI8Gqv)| zA75N_!GTrKNyT~W1iKA)Li+?dr<<{PcIlm{|Nq60zGPfs1J8cxkzJ z&mEte?;V>rc3l40`~q+GwBQ`CSshXlr;Nkn*%2yj%GJ}1lm(e7M-LsGz8n=X6ZkPI z4t=`_gypGg`c^On>Wr~$B*JO*-^|Edl3JIMj!UAi!a?D1c@p$;Mbt37gbDfzL9gYd zTwNXz+6=yvD2~=TQM`{3gp+V!tpBEE?9nUay!;VRAW zrp`SyBMV4TM*6u!ieuMEO!*j!d{e+H`54Jkx`5DY3~uit3}F=x!FN(d<}%{DMDV== zyr9XXjP#NrLXQ^&^}zKok#2?3V+|P|a&AtYl#z9lYf^@{J8e=%_A+--M&6P^MHvMh zgNrk&T(`S23V;-4WP<{fNC7>N6#FZUor>3HTpNDUxrlTw$?$fhrLK||0QI>;03*vH zVKzuTm9VqW8Rd6zOkpZ-@`v$?p8QeJqZG=NKah-_DqinIApfmZ7`+uI(!}lJ z7j~&k5F_N=rP3U!(-7ej*D`_wIM%87Y|QMm-Z#U49LnvyjO}U2j7_GIU4ZEaROB=zsM`j!q!y$BJdN#*LNawT86RLS3V2=^7c{FIOmr5WvRSIHom zBTLH?5UFFullEDfxn(y-PE;yIsv5+YH3~FAepFLN*u*o=C*~p6dMLiiY|C z*-l?9+7!BV3fMjle*cJ#$2grU)~j&TFkHroiGaFuiQXqU%4K9HgFsy33;fAZF5^4N zpr98+#iTfw_*#Dw4F0+|PKomCnZI=4AHaO;HM{H1h~X61|&zh z3~JRA168e5hKoK=k+{UK3?$OiolE@cfE`|x3<~9Po>h@C%flOGRat^;#3TjPL2o*m<3xkK>|_BlbOE2YnZR-+~glj7wta z`Gw1v#cyNRF5|M;HNS8fRs1$~?J}xk*Zjg|)bQKbwab_jyXF@zV=ljqUAv5Vv1@+e zGV1tk?Am2q9=qljE~B2`#;#pP!VH1o^eYz0!DU>T3<7Z(tqypb13upYU+92eqhhHgr>W6qp74>g|;pHH%2Mc_BQV%m! z;K}XDFhy@|DMu>Yn&Mc6)3}z0(F%{(7#Vw#z$b_?0jEDj(NEo?)n>N8EUgS)UaG__g3oi5wcEN=j zU>96+q?HuqGLrL4L@pyazYvK@%XWjNV^MzhJmWeiOQfw+vr9PkVW z9N!AG2`;gcksRSNh9!eQTyg~=Db5vJ1xbu@#a3ag_z{VKx^u-w35n4zV`L%_qo3O~bpBjOm8q1K@f4;zV?&Z205#0=YRBF8c)ir!Ia67qc9dPLK9HR9<5x5=yJO}(7;CB406~0Ev6KjOH?gT#CI9+QY)(7ck zwZIEwxYqM!2mA|xpB8w|S?X#Fa67xDqKUV|Pj$d20v|1Y8BQ|R{I3)E^` zd_3$`@qclk?^C$$;%Tjt!m;Qw?ecX7aN)PgFq(d*!rK*2>ys3IX8)nac zJn+#*d{!ye-J#DX4)iCWLmzF#XPdN6PT^YMcKWY(z>h-y?dWGZ;I}*AKXbqzcfg-@ zz*Dg{Z>P_2;8b4aYQd4#@F^^CpnuT;&&CwRj{g-7_(}(SwFCaR1AYqhwv$`vfZybR z?{>gxDrLt%i*TFi^|u}9#{wV4)*yTR30tY^K3;HTiM)VK2B0MGVYIPUa z`A-6`iQ%Z8BJ2XLWyKDa>=jT;c1ncohs{GcF zzoLMk@%4Dq-7li61yoXiuUZ9Ls#}94gPC(ncCO9SUmDhgCqwzkc#^jF$#{K>7sx5O$ht_TGRDl6-n z>Y#y<8so1Zmi%k zR`Qp;#*qOnwoTZYRXm~ODx%@rtTbj#XW3Gv$?SbZ$1=a3!2No5+gxYY)`_`RlbNQ4 zu}?N=%BdBh2{oaG%|Xqdjg9~bc&7r!c#$rj9K%__;YGMSRtB9?f`X*6goikWG%(E7AOJMhIc{* zzOorSNYVo`?&p9+*tNN-zFu6Au|(fZZ6MvW1Z$+}>1Gaw1;{L%xUjM^RNK-tAEiAH zpAQvHW%<+^1I=cAu#pUcv0hzMYbAQXV9bVS#bu9(7`whu)RF0Y$fD`x_erTyu5PeP zsRoEruukM4WISb13$ z(KKOheOqg-9E9<)B<4qDaxf_x0!c7%x531$42=gSH8sJKh6c0^4VJwE~=aUh*`e-cYg=*vNr))U&4E5ihBp+llnlyo)lsth3bdU)&D3B|M z8dxx;jt174iP}vY3TK4sFzT)gw&v$et*ft}F6wB>wDR*RDidaHBms6CuZtZuv9f7y zrThZ*+=dYR>uOTszqG;t#mXva@FS}oW(svzwgoGjn_5x*P8Tjv(;(-^`LQLC$~i$a ztPR1&5G_l@VOo|D{pNo&L)3<_4Ar88;kiOt#Me>87sKQ3Ps=bO*P=GY^Mx;uA2hp` z356U5jVld?{BZ=tSLx{#_de`^WOuCA|ctO?F`D1(Z;%4rp% z`LtCnKfgkJIi3DDf1||~6f!1;tbr}yiw3fLuE-9aKRdPdLg62bL0Nom~*kYrEY85KJ zVF4bv{Qk|gQ&N{reZ~{J_#7ZMB@(!*6?0mfDr?ag)nkYtsvt(x3$T2OhM~ghyGflQ zM%xzmPA0*s9>z;pYOkJMcU3-yHkc1%NL5LzhiDAvHc{`IEGxFwk}M-0Rbd_MQ)|Y> z(NL|m&Q_wIuj<*Vko;1sm|s|PY+1yT**U-{k7W4|5?^7zgd~&N&g50;HMg|{E3c|+ z3AI(zQxiPS6yaq2{~-?g_zi_|5*9lo50NOZwk1{47OJ z`mbX+e&a&~!hg$f`dK>)WQX-QH2=Ra{27K1$G$=e#HR;`rl&9WQy@MYaA>^T0l$Xf zs~G(QjQ_I?e}vKB%kYay5d=DI#G&N|8Q#P2s~FDxww>Xd82zmb=jHo4!#6W}`fUmd z6wS+#emjE#nU(5@*7I0KzlGts4ById+E++{_&BojGp`3*NlD}qaQQ~Hwf1J9-(luC+*tPb{NI*$qd)~)rh{B z;ioYEygj*z;mGKC1+HNN^{AGquV)!2!UhaTj#&FI*#Bk1knZn5r|HSzJkkS8{;rA$< z>|iqdK}P=y!=Ge0*MBp^xt?z@oa^}+!?`}LLm&)+?D;AVZO>s0=W>r{IF~zt;au+d z4Cit$Q8?Mb&*auHdTxg+8P4_j0mHdIKW8}CX9L5zJ})qQ2h-rgO7AUkupK82GV&cUJWKZenBJIrGEYm7d~@YfmM%5bj#Vh8+(4Cnd!1;cs1HZYv? zf6)Oy3HvK4kR3MT(D}vZB19mZ_d5ZFlYOS((Dauw`YjBneWMhJ&sH3oeu=_Q2FB~@ zEsX!UjQ+=r54ZpC8P4thXNGh8zsYdk-+jUGB~0#6?BAq7`oDog>+e-K>3;*m{fwTE z(}E1=oAe@iW7Alqv0Bh`jA(ue!4SK*{TZ%@W3oXT-3(`P)R=l(p2;k^Fd$?(50J`XaS>;DYH zx&E&)oa@=kaIVi64Cnd`R-5uDzg+II4CiuBXE>KTS>a?K-ri1S^jx278P4l_C&PJt zzlGsk|CJ8-8isQ}c~aq2?%dB`bHG1fIQLuH7fgZd#`zCXIPqVFL;L?IMt>i}r!sn8 ze`^{2Hb&pf@P`?`Na1AvQiewu&d0sCFua)2-_H2+aqlXI^Y(MC!YRKuFg~9;&<`FC zA_Ob_Oofv@pTnW;b`8US$M6Rk&gq|HIH&)B;cqfNUod<%!-pM?8wApa<0mrwEyQU( zPjSGe2YiMDeyIaq&2TQamf>7(lLH=NIOlVl!pT2*KX@mj z=l$SWBS45i_J1FTw)52tzm4HHGMv}P?F{GiI~mUD%Z>n31d{t64lTEa;hdiKp;I7w zj(0lXKXAaGU^v(FS%!1|yBN;tKVdkhKjuiGrdVeP;8N%71cj6SyBK~lqo2g^(-_X% zld~CK%;+aE{=7Z;=|~VEklg(^v_6k9oa>*LMbwi13!F9mB!+YP3lvWNR>tV78T~&Q zejUR(pA`=Hdkp`O@!8LC&Zp)m5Ft>0IsJTwbNb~Be-~+Ve(z`azZm``!#Vvc4F8hR zf2we@&v^|0g3*7)@FCyA4Fc)U`Jc>i&fiox@joAj*1wF=bN+n{=i{Pdjz(ex>Mx$a zq4}JtaMI^%hM&)HZqJ(>@CO;r`{j|xfGGm;=l$J$9Gd@OUfdv9?U~~hu)x>jQqxahIJZv` z!?}Hap>UG>TO69tv7>N0$h*GkhDP=lO~<`WcM=D+l`7Cx8fn z%IhyUwB2?yoVR-?`iNT6bG(S*yxn_@;k@1Z!MEdl=$&yb_dbSm{!cpKFFN3xPmJ?V zVf^1af%x!t!B;?;+HeY1 z{A&0-1K*l2MYhG1^>0;v&w>>ru2Wqf~PCH zoH|4@)OP4{$@>e2CgM{R{a`kqrSjUW=#RJHpDMZcTJW&KS6lG43V+OkzoGEoSn!Ps z-)O<7EBtv2e!kLky9L+Z#oS@RYZU#P7W{aHziYwi`$81nXY&iPPqVUn3Y!-Yey76E zu;5Dv5f{UlVZm$CCEm@(vBam(E%Ey-_{oaT+ZLR@^F!ft3!bC+x3Kz7{5uu>)fW6Y zg)g$;=}MpPTkuSU-)zC@dp;Cyx8S!c{HGTDw+jEc1>d9ahb*|(=TQs(fueuPf^SrI z+hD;DQRVfV1=n(4wBWy1^siX(Unsl1Wx@ZT^m*TczpVItWWiHZdF`{{A1eB=P-p^^E-~w^Zb67;hfJD#)s#Ze1L*AU&NCF;XGeI5vas@zJ8`~YrdSuMH?7B z*OSJ;6o?PUcL_8Z|H1*MUxA`v<&*1xpXPv1cfgw*@Wl>z*a2VVfQw&Ov(f(#4)lDV zA-CJ74)nvRU~TxH?0}DTz~%Q=lkI%D1O2rQc&7vYBM1B`2YkB&zQY0EUX2!|o?KqK-=TKqr;NVDf&L?8a=@Q-!2eVEH68e0qHt?|uXLdAaKNc=rC{}|RT56ZY3+`JmHs&i zC*j-<_ku>3i`RkvB!yGClMb4`fYIN_@c(K4MNBSl$GF@FnLdXpdeWczGOfRwjtKj# zX85U!p6toTIj1w6k2l;5=g;5OV>i)1(pW;v9i`~WPbM=wo8iR_KSklB&+QC9l;PYC zdi+E5Tu(J^68XKK@$oA@WS{j6pQUi(vyI`64CivMV)!GB{(6RUee`^t_;WsYD0U~C(D*9^A`t!p4voJ>AOi9EJr0e3 zNFV~?T<(5>Cd=(pxK-{T)&E-M>h%w++~Zh({UUg3efYSA@~H8%89lf2xeVv@7c!jZ ztD525p0x_M=1Y%%tohRGN!EPvaS@lh%t7w04sw6Ya9)l-V|;iy{*vL`4tm{)^yGfK z&4K<^hI9SjWjME6RN+>;?PWOEU;aKovLCvrAweKLx&C^6&Z_?rjGpW1WjNRKB!ye` z%w;&&Q?IL8%XbRn&-FZy;g28>+HWsqIB&0(Gr7FIx}VYW_UZ|SbN_#y;k+LGk>Ok) zz20WEXD_4Y{6BWUzh*eM{~@XaBmUg}nF_bses)6)<2QXu_T;n3yV&iM28{APx8`}~;U+&=d({>vDD{eBS1<@V9* znpXStF#g;=FEO0!^E$)1KJPf_vzy^uAH6TJa5xKd0C0zE=KkIMDB6IJeKo4CnUwLg7~X=%386`ng`;rDWVb{5`V! z5vS|pEovZ3^t}Dl>(GSr{H|s6pD{jqz1B*HG}cta)S)#{@lRu zCE%y|w=taCt)1bVkAALVweu~Ep6B-`4Cnb>rEsgAS2LX3`7y?a=XX8Bxt=M5h1o>8 zbN|%OAFTQx$>{HA_PLtjJYV{`B=O<-TBhi&`MQO>^LlzxIsgPK{Wykm`<%mYZl4B*bNU4g=k%MTSRwZvTx-Am z1H-xge`omb82zDaT>UP?&rvuHvo2)#WJdp2hVyyldkiO^q+qqrs}fGa_c`ESJK*$d z^c1Z8oz|IkzO3|@I`HB1EN;(54)pprfUW!=ccA~H1OA2s?zGN)I2#XhJ&$(4^BnMU z2VDPNp*3Gk4)ixW;I}*A4?Ey5I^eH3;2%2Rv!$B|KZ)X6*O$i}@WYw?sqSd{?>W%x z=TKxH&R?%f5YGAJF#eqWZU_7$hJV2Hk-s}9{h#wOnH_dB`g&Ybu z`N+4mJ=ZY&Ifnn4;lE?Ji;WXF{fP|c^m7@`={p(D>3_lSB}}f~-$D9w{4GY$>!be8 z09B6?#^)$De)tW;y$ZL|f7^k+*n!^fKtEsMR4%+6BaEJx<9m#rm*aj$zm4fX{1DtA zSnYFy1O6R`^YMHh!#@E(?I#5c=k`CF;h!@4YKHUiVI9M{+(w3T{#^{epYgw$;WS3l zdfv`(&Y$c;fy$SzHT_Tm5eQ2~Tdi@u&xFd0zSX1gi3|BC1Ww!mb zZNk&>FH;e!;uZ9|uhcx~d?5NmDAE!L8tTA*2&32X38LR>wg!Tn_~>T}l!o&eJU~8r z>}uuX86cnGjGpVG$M(dZ+nMXh>Az2eDyY}3g#DLl5NFPZ^XGg89nE-o#9?}O{)Bj;n@sd$?!ae-@|ZyNqlbAH#mFMHMIk7bxkL|AB^xp3^&y^ z;i}6L$Q*|2yc4v5;i^d!2YDIF8LppW5VmKg z4dSfprlwz`;>M6+5D52(EBS8%!-fAzeQ=yjP=ilBvF+u{y}47vWemPSvk@F zp5a8VdA-MQy3{_s--3&V2=OT{{6nB<;-mLL>U}|kAB{tY2^M;viU=^A(zGl4&R{t4 zAEW5+VmR^Fw!4qvM88*pZEn-0A`^_+!X`z8HOECsEazH1r%NrHp=m)p9qAyhrv$i+8w z<+)a0wFK*{bLqpUMy~itgWTDzt%d+{<>yyze_01V7^9LAmGv8x2|s{CzdbPka{85x zL~{P?7vkIf2Z&BiZTpKix$|o9Lk;3bbShzw`k=@w7o>k1C%2`E%tD6C&&7``@mu@` zla-G}b*o`4ALkUm!IGGpgQZBe=6~rk-~Ij}vBdn@e@WD)SRDwH3z&T%IQ|&G;niGw>gJu{aHPL-BJV+UZc!l7H7F?wPzsCev{t_#ub7D}wb4W9}?I z)carhO#u50{P%vMAkkm{leO#r+Ak3N&t+i%zchHT-vRjFq2zz!+y03;v9#d-q0ja+ z8Lgped{DVrRcQJFpUUc%mTF#`lRsc>`4YBdCweyYI+w(tg3dY&~GujSJ2S9wVpE0zR zkd!D^V${i4ipBa9h5h4bXJ|X-2R5K>3J2?8U<)t*H-3pt`K#4A@uNu9^|=T75k>rR zXaoJWiILmb6bj~+o;TwZRAT(Ln>1ChwsLMuHGVoVcV0^qD)zq}9>5|1pGRwk6V-*D zcX#0a1{`Wy5j#7j*XtX4&Q0kr8bJC>K}&(BpNaea%GajRYYHU)rU9gH2Q39IpPp^@ zSN;l-IzIm<`4GgUFw;$=$-~7|sI0c@bp6m8Ey?$?yr~hyO>3vFmIw^rb3=wY^e@;e$6|`3Q zp8&IypP4JACOjJeWR?FBFgy9}HuA}rtn#T(A^E8m|0z)Nharv!lEuTBD*adv;@Yp$ z)0mqFl0`wE&vi)ufJ*>NO$twRR4)WJNB~$EB2}yn- z4y*h>ImmAxFB9s0g^Ep*|GnfOfBDlgMU}EY$v+c^RsIbqIJ^Ak70C3PAe)q{3!|Fdo6uXd0xj0O7N$0W-Ot9*JN(N6wO+9ZZRdxaTU#r|96zvLi)_cJm{l}btSi*Q)wAA|C-vw!9pGGU2IuTNI_^nRjU{>>hlVu4CX z@+af4%AeyPze>s1^omWA|5ZB3@9B{#bUh>alo!oU~Q+0mXg^_Yx&=&^|z$2-7XU>SLsOq^Kn@9AM3W4?^PAg?NSWX z$twR8q!;$H$ltHzuT(h6zraR5?SHV#f1W7?Rvje&dmZH8@kg0nu@UWGg^m0l53-lP z+%MD5v*gQ~{~sc~UH*3}`8|@2kUzsl{v2%nvdh14HqdY5o4s&vm0yYUcJf!!z!G6M z>_dUmq%6Wo@)Y}TmA_HTk5R~rrLW5~ufMc@YL!pl#iS}`aWiAT zlCS9{w|IJuzv3WY{MZr_>3T}}zXXR)r}14#PxiNtKgCO3z>mklgEjvn!I0AH?;~hh z?}hTpO0VT>y5Ruq$}j5;Ns*@rN&m}m==56uQYGK2f1Z**Ql-}?t3FLCy;Xm+pZZ_! zpucIOf3=PNw>#)RQ_1&e#nl<)34wl3&(8j}D!sOoPGn90D*$%(U!e3iRYJ1=92`2m z=KCrDJO61{@<(gI>g=`LABy^Ir~d=})ZdNtcKY|&=syp#t@_h`eLMYol>B2X`da}${(d|CMPmp0gZck64*F+NV~L>4S>DC%r|+M3(7)V9 z{{|$o+W!Lw`FTpdR|`^SonFJc9OU0&BfklWbb6hyQP>=8=YQo&et{O`ApaPoxAVW9 zN`93}NA>4Q99H?~ImlnC=wCn~nU{4)VA6Q~v!9@^{!-Ms{6zK`G4ATsq9>cSQU`;<=rMJq@Q}VUHXeQSC^O4@p|9e#a zwV#pyU57)b*Lb^ Date: Sat, 20 Dec 2025 10:20:27 +0000 Subject: [PATCH 04/12] add middleman toolchain config files --- configfiles/middleman/DatabaseWorkersConfig | 8 +++ configfiles/middleman/DummyToolConfig | 3 + configfiles/middleman/JobManagerConfig | 4 ++ .../middleman/LoggingReceiveSenderConfig | 8 +++ configfiles/middleman/MonitoringConfig | 2 + .../middleman/MonitoringReceiveSenderConfig | 8 +++ configfiles/middleman/MulticastWorkersConfig | 1 + configfiles/middleman/README.md | 25 ++++++++ .../ReadQueryReceiverReplySenderConfig | 10 ++++ configfiles/middleman/ResultWorkersConfig | 2 + configfiles/middleman/SocketManagerConfig | 2 + configfiles/middleman/ToolChainConfig | 58 +++++++++++++++++++ configfiles/middleman/ToolsConfig | 12 ++++ .../middleman/WriteQueryReceiverConfig | 9 +++ configfiles/middleman/WriteWorkersConfig | 2 + 15 files changed, 154 insertions(+) create mode 100644 configfiles/middleman/DatabaseWorkersConfig create mode 100644 configfiles/middleman/DummyToolConfig create mode 100644 configfiles/middleman/JobManagerConfig create mode 100644 configfiles/middleman/LoggingReceiveSenderConfig create mode 100644 configfiles/middleman/MonitoringConfig create mode 100644 configfiles/middleman/MonitoringReceiveSenderConfig create mode 100644 configfiles/middleman/MulticastWorkersConfig create mode 100644 configfiles/middleman/README.md create mode 100644 configfiles/middleman/ReadQueryReceiverReplySenderConfig create mode 100644 configfiles/middleman/ResultWorkersConfig create mode 100644 configfiles/middleman/SocketManagerConfig create mode 100644 configfiles/middleman/ToolChainConfig create mode 100644 configfiles/middleman/ToolsConfig create mode 100644 configfiles/middleman/WriteQueryReceiverConfig create mode 100644 configfiles/middleman/WriteWorkersConfig diff --git a/configfiles/middleman/DatabaseWorkersConfig b/configfiles/middleman/DatabaseWorkersConfig new file mode 100644 index 0000000..3a1abaf --- /dev/null +++ b/configfiles/middleman/DatabaseWorkersConfig @@ -0,0 +1,8 @@ +verbose 2 +hostname /tmp +#hostaddr 127.0.0.1 +dbname daq +port 5432 +user root # fall back to PGUSER env var if not defined? FIXME better user? +#passwd +max_workers 10 diff --git a/configfiles/middleman/DummyToolConfig b/configfiles/middleman/DummyToolConfig new file mode 100644 index 0000000..95cad88 --- /dev/null +++ b/configfiles/middleman/DummyToolConfig @@ -0,0 +1,3 @@ +# Dummy config file + +verbose 2 \ No newline at end of file diff --git a/configfiles/middleman/JobManagerConfig b/configfiles/middleman/JobManagerConfig new file mode 100644 index 0000000..4166aac --- /dev/null +++ b/configfiles/middleman/JobManagerConfig @@ -0,0 +1,4 @@ +verbose 2 +#thread_cap 100 # default: 80% of detected CPUs +#global_thread_cap # default: == thread_cap +self_serving 1 diff --git a/configfiles/middleman/LoggingReceiveSenderConfig b/configfiles/middleman/LoggingReceiveSenderConfig new file mode 100644 index 0000000..5cace3a --- /dev/null +++ b/configfiles/middleman/LoggingReceiveSenderConfig @@ -0,0 +1,8 @@ +verbose 2 +type logging +multicast_address 239.192.1.2 +port 5000 +local_buffer_size 200 +transfer_period_ms 100 +poll_timeout_ms 20 + diff --git a/configfiles/middleman/MonitoringConfig b/configfiles/middleman/MonitoringConfig new file mode 100644 index 0000000..1c1264c --- /dev/null +++ b/configfiles/middleman/MonitoringConfig @@ -0,0 +1,2 @@ +verbose 2 +monitoring_period_ms 60000 diff --git a/configfiles/middleman/MonitoringReceiveSenderConfig b/configfiles/middleman/MonitoringReceiveSenderConfig new file mode 100644 index 0000000..54e9459 --- /dev/null +++ b/configfiles/middleman/MonitoringReceiveSenderConfig @@ -0,0 +1,8 @@ +verbose 2 +type monitoring +multicast_address 239.192.1.3 +port 5000 +local_buffer_size 200 +transfer_period_ms 100 +poll_timeout_ms 20 + diff --git a/configfiles/middleman/MulticastWorkersConfig b/configfiles/middleman/MulticastWorkersConfig new file mode 100644 index 0000000..384c877 --- /dev/null +++ b/configfiles/middleman/MulticastWorkersConfig @@ -0,0 +1 @@ +verbose 2 diff --git a/configfiles/middleman/README.md b/configfiles/middleman/README.md new file mode 100644 index 0000000..5afa52c --- /dev/null +++ b/configfiles/middleman/README.md @@ -0,0 +1,25 @@ +# Configure files + +*********************** +#Description +********************** + +Configure files are simple text files for passing variables to the Tools. + +Text files are read by the Store class (src/Store) and automatically asigned to an internal map for the relavent Tool to use. + + +************************ +#Useage +************************ + +Any line starting with a "#" will be ignored by the Store, as will blank lines. + +Variables should be stored one per line as follows: + + +Name Value #Comments + + +Note: Only one value is permitted per name and they are stored in a string stream and templated cast back to the type given. + diff --git a/configfiles/middleman/ReadQueryReceiverReplySenderConfig b/configfiles/middleman/ReadQueryReceiverReplySenderConfig new file mode 100644 index 0000000..5e674e5 --- /dev/null +++ b/configfiles/middleman/ReadQueryReceiverReplySenderConfig @@ -0,0 +1,10 @@ +verbose 2 +port_name db_read +rcv_hwm 10000 +conns_backlog 30000 +poll_timeout_ms 10 +snd_timeout_ms 10 +rcv_timeout_ms 10 +local_buffer_size 200 +transfer_period_ms 200 + diff --git a/configfiles/middleman/ResultWorkersConfig b/configfiles/middleman/ResultWorkersConfig new file mode 100644 index 0000000..caf332a --- /dev/null +++ b/configfiles/middleman/ResultWorkersConfig @@ -0,0 +1,2 @@ +verbose 2 + diff --git a/configfiles/middleman/SocketManagerConfig b/configfiles/middleman/SocketManagerConfig new file mode 100644 index 0000000..f9014b8 --- /dev/null +++ b/configfiles/middleman/SocketManagerConfig @@ -0,0 +1,2 @@ +verbose 2 +update_ms 2000 diff --git a/configfiles/middleman/ToolChainConfig b/configfiles/middleman/ToolChainConfig new file mode 100644 index 0000000..8beca02 --- /dev/null +++ b/configfiles/middleman/ToolChainConfig @@ -0,0 +1,58 @@ +#ToolChain dynamic setup file + +##### Runtime Paramiters ##### +UUID_path ./UUID # UUID_path for fixed UUID, if doesnt exist file will be generated. Remove option for always random +verbose 2 # Verbosity level of ToolChain +error_level 0 # 0= do not exit, 1= exit on unhandeled errors only, 2= exit on unhandeled errors and handeled errors +attempt_recover 1 # 1= will attempt to finalise if an execute fails, 0= will not +remote_port 24002 # port to open for remote commands if running in remote mode +IO_Threads 1 # Number of threads for network traffic (~ 1/Gbps) +alerts_send 1 # enable ability to send global alerts +alert_send_port 12242 # port to send global alerts +alerts_receive 1 # enable ability to receive global alerts +alert_receive_port 12243 # port to receive global alerts +sc_port 60000 # port for slow control + +###### Logging ##### +log_interactive 1 # Interactive=cout; 0=false, 1= true +log_local 0 # Local = local file log; 0=false, 1= true +log_local_path ./log # file to store logs to if local is active +log_remote 0 # Remote= remote logging system "serservice_name Remote_Logging"; 0=false, 1= true +log_address 239.192.1.1 # Remote multicast address to send logs +log_port 5001 # port on remote machine to connect to +log_append_time 0 # append seconds since epoch to filename; 0=false, 1= true +log_split_files 0 # seperate output and error log files (named x.o and x.e) + +###### Service discovery ##### Ignore these settings for local analysis +service_discovery_address 239.192.1.1 # multicast address to use for service discovery +service_discovery_port 5000 # port to use for service discovery +service_name ToolDAQ_Service # name of Toolchain service to braodcast +service_publish_sec 5 # heartbeat send period +service_kick_sec 60 # remove hosts with no heartbeat after given period + +###### Backend Services ##### NEWLY ADDED NEEDS TO HAVE PROPER DESCRIPTIONS AND SOME PRUNING BEFORE RELEASE +use_backend_services 1 # +db_name daq # +verbosity 1 # +max_retries 3 # +advertise_endpoints 0 # +resend_period_ms 1000 # +print_stats_period_ms 1000 # +clt_pub_port 55556 # +clt_dlr_port 55555 # +clt_pub_socket_timeout 500 # +clt_dlr_socket_timeout 500 # +inpoll_timeout 50 # keep these short! +outpoll_timeout 50 # keep these short! +command_timeout 2000 # +multicast_port 55554 # +multicast_address 239.192.1.1 # + +##### Tools To Add ##### +Tools_File configfiles/middleman/ToolsConfig # list of tools to run and their config files + +##### Run Type ##### +Inline 0 # number of Execute steps in program, -1 infinite loop that is ended by user +Interactive 0 # set to 1 if you want to run the code interactively +Remote 1 # set to 1 if you want to run the code remotely + diff --git a/configfiles/middleman/ToolsConfig b/configfiles/middleman/ToolsConfig new file mode 100644 index 0000000..5afb0ac --- /dev/null +++ b/configfiles/middleman/ToolsConfig @@ -0,0 +1,12 @@ +loggingReceiver MulticastReceiverSender configfiles/middleman/LoggingReceiveSenderConfig +monitoringReceiver MulticastReceiverSender configfiles/middleman/MonitoringReceiveSenderConfig +writeReceiver WriteQueryReceiver configfiles/middleman/WriteQueryReceiverConfig +readReceiver ReadQueryReceiverReplySender configfiles/middleman/ReadQueryReceiverReplySenderConfig +multicastWorkers MulticastWorkers configfiles/middleman/MulticastWorkersConfig +writeWorkers WriteWorkers configfiles/middleman/WriteWorkersConfig +databaseWorkers DatabaseWorkers configfiles/middleman/DatabaseWorkersConfig +resultWorkers ResultWorkers configfiles/middleman/ResultWorkersConfig +jobManager JobManager configfiles/middleman/JobManagerConfig +socketManager SocketManager configfiles/middleman/SocketManagerConfig +monitoring Monitoring configfiles/middleman/MonitoringConfig + diff --git a/configfiles/middleman/WriteQueryReceiverConfig b/configfiles/middleman/WriteQueryReceiverConfig new file mode 100644 index 0000000..64a812d --- /dev/null +++ b/configfiles/middleman/WriteQueryReceiverConfig @@ -0,0 +1,9 @@ +verbose 2 +port_name db_write +rcv_hwm 10000 +conns_backlog 30000 +rcv_timeout_ms 10 +poll_timeout_ms 10 +transfer_period_ms 200 +local_buffer_size 200 +#am_master 1 diff --git a/configfiles/middleman/WriteWorkersConfig b/configfiles/middleman/WriteWorkersConfig new file mode 100644 index 0000000..caf332a --- /dev/null +++ b/configfiles/middleman/WriteWorkersConfig @@ -0,0 +1,2 @@ +verbose 2 + From 4da215841f471c6ce50a43eb26506c1eb592ed45 Mon Sep 17 00:00:00 2001 From: Marcus O'Flaherty Date: Sat, 3 Jan 2026 16:06:44 +0000 Subject: [PATCH 05/12] numerous updates, mostly working --- .gitignore | 11 +- DataModel/DataModel.h | 1 + DataModel/ManagedSocket.h | 4 +- DataModel/MonitoringVariables.h | 27 ++ DataModel/QueryBatch.h | 55 ++- DataModel/ZmqQuery.h | 17 +- DataModel/type_name_as_string.cpp | 16 + DataModel/type_name_as_string.h | 47 +++ .../DatabaseWorkerMonitoring.h | 8 +- UserTools/DatabaseWorkers/DatabaseWorkers.cpp | 345 ++++++++++++------ UserTools/DatabaseWorkers/DatabaseWorkers.h | 3 + UserTools/Monitoring/Monitoring.cpp | 7 +- .../MulticastReceiverSender.cpp | 49 ++- .../MulticastWorkers/MulticastWorkers.cpp | 16 +- .../ReadQueryReceiverReplySender.cpp | 69 ++-- .../ReadQueryReceiverReplySender.h | 2 +- UserTools/ResultWorkers/ResultWorkers.cpp | 55 ++- UserTools/SocketManager/SocketManager.cpp | 18 +- .../WriteQueryReceiver/WriteQueryReceiver.cpp | 86 ++--- .../WriteQueryReceiver/WriteQueryReceiver.h | 2 +- UserTools/WriteWorkers/WriteWorkers.cpp | 17 +- configfiles/Dummy/ToolChainConfig | 8 +- configfiles/middleman/ToolChainConfig | 20 +- .../middleman/WriteQueryReceiverConfig | 2 +- configfiles/template/ToolChainConfig | 8 +- 25 files changed, 618 insertions(+), 275 deletions(-) create mode 100644 DataModel/type_name_as_string.cpp create mode 100644 DataModel/type_name_as_string.h diff --git a/.gitignore b/.gitignore index 3ccff8f..059a607 100644 --- a/.gitignore +++ b/.gitignore @@ -19,11 +19,20 @@ *.pyc # data files *.root +# core dumps +core +# ToolFramework log files +log.e +log.o +# ToolFramework UUIDs +UUID # the dependencies symlink Dependencies -# the main executable +# executables main +RemoteControl +NodeDaemon # lib folder is just build products lib/* # the include folder is actually automatically populated diff --git a/DataModel/DataModel.h b/DataModel/DataModel.h index 9ca8a5d..68c7d9f 100644 --- a/DataModel/DataModel.h +++ b/DataModel/DataModel.h @@ -11,6 +11,7 @@ #include "QueryBatch.h" #include "ManagedSocket.h" #include "query_topics.h" +#include "type_name_as_string.h" // mostly for debug class MonitoringVariables; /** diff --git a/DataModel/ManagedSocket.h b/DataModel/ManagedSocket.h index d838c44..9c36b29 100644 --- a/DataModel/ManagedSocket.h +++ b/DataModel/ManagedSocket.h @@ -9,8 +9,8 @@ struct ManagedSocket { std::mutex socket_mtx; zmq::socket_t* socket=nullptr; std::string service_name; - std::string port; - std::string port_name; + std::string remote_port; + std::string remote_port_name; std::map connections; }; diff --git a/DataModel/MonitoringVariables.h b/DataModel/MonitoringVariables.h index 14bc0d7..4f401b0 100644 --- a/DataModel/MonitoringVariables.h +++ b/DataModel/MonitoringVariables.h @@ -1,11 +1,38 @@ #ifndef MonitoringVariables_H #define MonitoringVariables_H +#include +#include class MonitoringVariables { public: MonitoringVariables(){}; virtual ~MonitoringVariables(){}; virtual std::string toJSON()=0; + ToolFramework::Store vars; + std::mutex mtx; + void Clear(){ + std::unique_lock locker(mtx); + vars.Delete(); + return; + } + + template + void Set(const std::string& key, T val){ + std::unique_lock locker(mtx); + vars.Set(key, val); + return; + } + + std::string GetJson(){ + std::unique_lock locker(mtx); + std::string ret; + vars >> ret; + ret.pop_back(); // remove trailing '}' + std::string ret2 = toJSON(); + ret2[0]=','; // replace leading '{' with ',' to concatenate the two + return ret+ret2; + } + }; #endif diff --git a/DataModel/QueryBatch.h b/DataModel/QueryBatch.h index ffcce9a..962f468 100644 --- a/DataModel/QueryBatch.h +++ b/DataModel/QueryBatch.h @@ -7,6 +7,10 @@ struct QueryBatch { + QueryBatch(size_t prealloc_size){ + queries.reserve(prealloc_size); + } + // fill / read by receive/senders std::vector queries; @@ -16,22 +20,24 @@ struct QueryBatch { std::string runconfig_buffer; std::string calibration_buffer; std::string plotlyplot_buffer; - std::string rooplot_buffer; + std::string rootplot_buffer; // flagged for can't be batch inserted by workers std::vector generic_write_query_indices; // set by database workers after batch insert - bool alarm_batch_success; std::vector devconfig_version_nums; std::vector runconfig_version_nums; std::vector calibration_version_nums; std::vector plotlyplot_version_nums; std::vector rootplot_version_nums; - QueryBatch(size_t prealloc_size){ - queries.reserve(prealloc_size); - } + std::string alarm_batch_err; + std::string devconfig_batch_err; + std::string runconfig_batch_err; + std::string calibration_batch_err; + std::string plotlyplot_batch_err; + std::string rootplot_batch_err; void reset(){ alarm_buffer = "["; @@ -39,11 +45,8 @@ struct QueryBatch { runconfig_buffer = "["; calibration_buffer = "["; plotlyplot_buffer = "["; - rooplot_buffer = "["; - - alarm_batch_success = false; + rootplot_buffer = "["; - // the presence of returned version numbers is indication that these batch insertions worked devconfig_version_nums.clear(); runconfig_version_nums.clear(); calibration_version_nums.clear(); @@ -51,8 +54,42 @@ struct QueryBatch { rootplot_version_nums.clear(); generic_write_query_indices.clear(); + alarm_batch_err.clear(); + devconfig_batch_err.clear(); + runconfig_batch_err.clear(); + calibration_batch_err.clear(); + plotlyplot_batch_err.clear(); + rootplot_batch_err.clear(); } + void close(){ + if(alarm_buffer.length()!=1) alarm_buffer += "]"; + else alarm_buffer.clear(); + + if(devconfig_buffer.length()!=1) devconfig_buffer += "]"; + else devconfig_buffer.clear(); + + if(runconfig_buffer.length()!=1) runconfig_buffer += "]"; + else runconfig_buffer.clear(); + + if(calibration_buffer.length()!=1) calibration_buffer += "]"; + else calibration_buffer.clear(); + + if(plotlyplot_buffer.length()!=1) plotlyplot_buffer += "]"; + else plotlyplot_buffer.clear(); + + if(rootplot_buffer.length()!=1) rootplot_buffer += "]"; + else rootplot_buffer.clear(); + } + + bool got_alarms() const { return !alarm_buffer.empty(); } + bool got_devconfigs() const { return !devconfig_buffer.empty(); } + bool got_runconfigs() const { return !runconfig_buffer.empty(); } + bool got_calibrations() const { return !calibration_buffer.empty(); } + bool got_plotlyplots() const { return !plotlyplot_buffer.empty(); } + bool got_rootplots() const { return !rootplot_buffer.empty(); } + bool got_generics() const { return !generic_write_query_indices.empty(); } + }; #endif diff --git a/DataModel/ZmqQuery.h b/DataModel/ZmqQuery.h index 81b3789..3b682ac 100644 --- a/DataModel/ZmqQuery.h +++ b/DataModel/ZmqQuery.h @@ -28,6 +28,7 @@ struct ZmqQuery { // pub socket: topic, client, msgnum, query // router socket: client, topic, msgnum, query // replies: client, msgnum, success, results (if present)... + // if success is false, results are 1-part with an error message zmq::message_t& operator[](int i){ return parts[i]; @@ -51,21 +52,29 @@ struct ZmqQuery { void setsuccess(uint32_t succeeded){ //zmq_msg_init_size(&parts[2],sizeof(uint32_t)); // this is from underlying c api... mismatch zmq_msg_t* / zmq::message_t new(&parts[2]) zmq::message_t(sizeof(uint32_t)); // FIXME is there a better way to call zmq_msg_init_size? - memcpy((void*)parts[2].data(),&succeeded,sizeof(uint32_t)); + memcpy((void*)parts[2].data(),&succeeded,sizeof(uint32_t)); // FIXME make bool instead of uint32_t? return; } // for read queries, returned directly from pqxx, decoded later pqxx::result result; + std::string err; + + void Clear(){ + result.clear(); + err.clear(); + } // for setting responses of read queries void setresponserows(size_t n_rows){ + printf("ZmqQuery at %p set to %lu response rows\n",this, n_rows); parts.resize(3+n_rows); return; } void setresponse(size_t row_num, std::string_view val){ //zmq_msg_init_size(&parts[row_num+3],row.size()); // mismatch zmq_msg_t* / zmq::message_t + printf("response part %lu set to %s on ZmqQuery at %p\n", row_num, val.data(), this); new(&parts[row_num+3]) zmq::message_t(val.size()); // FIXME better way to call zmq_msg_init_size memcpy((void*)parts[row_num+3].data(),val.data(),val.size()); return; @@ -75,6 +84,12 @@ struct ZmqQuery { typename std::enable_if::value, void>::type setresponse(size_t row_num, T val){ //zmq_msg_init_size(&parts[row_num+3],row.size()); // mismatch zmq_msg_t* / zmq::message_t + + // what a mess. but only printf bypasses our great overlord's wonderful logging decorations + std::ostringstream oss; + oss << val; + printf("response part %lu set to %s on ZmqQuery at %p\n", row_num, oss.str().c_str(), this); + new(&parts[row_num+3]) zmq::message_t(sizeof(val)); // FIXME better way to call zmq_msg_init_size memcpy((void*)parts[row_num+3].data(),&val,sizeof(val)); return; diff --git a/DataModel/type_name_as_string.cpp b/DataModel/type_name_as_string.cpp new file mode 100644 index 0000000..25ee8af --- /dev/null +++ b/DataModel/type_name_as_string.cpp @@ -0,0 +1,16 @@ +#include "type_name_as_string.h" + +std::string current_exception_name(){ + std::unique_ptr own + ( +#ifndef _MSC_VER + abi::__cxa_demangle(abi::__cxa_current_exception_type()->name(), nullptr, + nullptr, nullptr), +#else + nullptr, +#endif + std::free + ); + std::string r = own != nullptr ? own.get() : abi::__cxa_current_exception_type()->name(); + return r; +} diff --git a/DataModel/type_name_as_string.h b/DataModel/type_name_as_string.h new file mode 100644 index 0000000..0672f83 --- /dev/null +++ b/DataModel/type_name_as_string.h @@ -0,0 +1,47 @@ +#ifndef TypeNameAsString_h +#define TypeNameAsString_h +/* usage: +auto returnedcovmatrix = htrackfitresult->GetCovarianceMatrix(); +std::cout << type_name() << endl; +*/ + +#include +#include +#ifndef _MSC_VER +# include +#endif +#include +#include +#include + +template +std::string +type_name() +{ + typedef typename std::remove_reference::type TR; + std::unique_ptr own + ( +#ifndef _MSC_VER + abi::__cxa_demangle(typeid(TR).name(), nullptr, + nullptr, nullptr), +#else + nullptr, +#endif + std::free + ); + std::string r = own != nullptr ? own.get() : typeid(TR).name(); + if (std::is_const::value) + r += " const"; + if (std::is_volatile::value) + r += " volatile"; + if (std::is_lvalue_reference::value) + r += "&"; + else if (std::is_rvalue_reference::value) + r += "&&"; + return r; +} + +std::string current_exception_name(); + +#endif // define TypeNameAsString_h + diff --git a/UserTools/DatabaseWorkers/DatabaseWorkerMonitoring.h b/UserTools/DatabaseWorkers/DatabaseWorkerMonitoring.h index 4e589a1..c6d139f 100644 --- a/UserTools/DatabaseWorkers/DatabaseWorkerMonitoring.h +++ b/UserTools/DatabaseWorkers/DatabaseWorkerMonitoring.h @@ -24,8 +24,8 @@ class DatabaseWorkerMonitoring : public MonitoringVariables { std::atomic runconfig_submissions_failed; std::atomic calibration_submissions; std::atomic calibration_submissions_failed; - std::atomic genericwrite_submissions; - std::atomic genericwrite_submissions_failed; + std::atomic generic_submissions; + std::atomic generic_submissions_failed; std::atomic readquery_submissions; std::atomic readquery_submissions_failed; std::atomic jobs_completed; @@ -50,8 +50,8 @@ class DatabaseWorkerMonitoring : public MonitoringVariables { +",\"runconfig_submissions_failed\":"+std::to_string(runconfig_submissions_failed.load()) +",\"calibration_submissions\":"+std::to_string(calibration_submissions.load()) +",\"calibration_submissions_failed\":"+std::to_string(calibration_submissions_failed.load()) - +",\"genericwrite_submissions\":"+std::to_string(genericwrite_submissions.load()) - +",\"genericwrite_submissions_failed\":"+std::to_string(genericwrite_submissions_failed.load()) + +",\"generic_submissions\":"+std::to_string(generic_submissions.load()) + +",\"generic_submissions_failed\":"+std::to_string(generic_submissions_failed.load()) +",\"readquery_submissions\":"+std::to_string(readquery_submissions.load()) +",\"readquery_submissions_failed\":"+std::to_string(readquery_submissions_failed.load()) +",\"jobs_failed\":"+std::to_string(jobs_failed.load()) diff --git a/UserTools/DatabaseWorkers/DatabaseWorkers.cpp b/UserTools/DatabaseWorkers/DatabaseWorkers.cpp index b324d83..363d9fe 100644 --- a/UserTools/DatabaseWorkers/DatabaseWorkers.cpp +++ b/UserTools/DatabaseWorkers/DatabaseWorkers.cpp @@ -60,7 +60,10 @@ bool DatabaseWorkers::Initialise(std::string configfile, DataModel &data){ thread_args.m_data = m_data; thread_args.monitoring_vars = &monitoring_vars; thread_args.job_queue = &database_jobqueue; - m_data->utils.CreateThread("database_job_distributor", &Thread, &thread_args); + if(!m_data->utils.CreateThread("database_job_distributor", &Thread, &thread_args)){ + Log(m_tool_name+": Failed to spawn background thread",v_error,m_verbose); + return false; + } m_data->num_threads++; /* ----------------------------------------- */ @@ -97,7 +100,7 @@ bool DatabaseWorkers::Initialise(std::string configfile, DataModel &data){ return false; } catch (std::exception const &e){ - std::cerr << e.what() << std::endl; // FIXME cerr -> Log + std::cerr << current_exception_name()<<": "< Log return false; } @@ -185,6 +188,7 @@ void DatabaseWorkers::Thread(Thread_args* args){ std::unique_lock locker(m_args->m_data->log_query_queue_mtx); if(!m_args->m_data->log_query_queue.empty()){ std::swap(m_args->m_data->log_query_queue, job_data->logging_queue); + printf("DbJobDistributor grabbed %d log batches\n",job_data->logging_queue.size()); } // grab monitoring queries @@ -203,18 +207,20 @@ void DatabaseWorkers::Thread(Thread_args* args){ locker = std::unique_lock(m_args->m_data->plotlyplot_query_queue_mtx); if(!m_args->m_data->plotlyplot_query_queue.empty()){ std::swap(m_args->m_data->plotlyplot_query_queue, job_data->plotlyplot_queue); + } // grab write queries locker = std::unique_lock(m_args->m_data->write_query_queue_mtx); if(!m_args->m_data->write_query_queue.empty()){ std::swap(m_args->m_data->write_query_queue, job_data->write_queue); + printf("DbJobDistributor grabbed %d write query batches\n",job_data->write_queue.size()); } // grab read queries locker = std::unique_lock(m_args->m_data->read_msg_queue_mtx); if(!m_args->m_data->read_msg_queue.empty()){ std::swap(m_args->m_data->read_msg_queue, job_data->read_queue); - } + printf("DbJobDistributor grabbed %d read query batches\n",job_data->read_queue.size()); } locker.unlock(); @@ -227,6 +233,7 @@ void DatabaseWorkers::Thread(Thread_args* args){ job_data->write_queue.empty() && job_data->read_queue.empty()) return; + printf("DbJobDistributor making db job!\n"); job_data->m_job_name = "database_worker"; m_args->job_queue->AddJob(m_args->the_job); @@ -262,7 +269,8 @@ void DatabaseWorkers::DatabaseJobFail(void*& arg){ //m_args->m_data->query_buffer_pool.Add(m_args->msg_buffer); << FIXME not back to the pool but reply queue //query.result.clear(); // to clear/release bad results... - // ideally we want to pass back an error or what happened to the client? + // ideally we want to pass back an error or what happened to the client (set query.err) + //query.err = ??? but what was the problem? DatabaseJobStruct* m_args=static_cast(arg); std::cerr<m_job_name<<" failure"<(arg); + printf("DB worker starting!\n"); // the worker will need a connection to the database thread_local std::unique_ptr conn; @@ -299,73 +308,110 @@ bool DatabaseWorkers::DatabaseJob(void*& arg){ // alarms insert conn->prepare("alarms_insert", "INSERT INTO alarms ( time, device, level, alarm ) SELECT * FROM jsonb_to_recordset( $1::jsonb ) as t(time timestamptz, device text, level int, alarm text)"); // rootplot insert - conn->prepare("rootplots_insert", "INSERT INTO rootplots ( time, name, data, draw_options ) SELECT * FROM jsonb_to_recordset( $1::jsonb ) as t(time timestamptz, name text, data jsonb, draw_options text)"); + conn->prepare("rootplots_insert", "INSERT INTO rootplots ( time, name, data, draw_options, lifetime ) SELECT * FROM jsonb_to_recordset( $1::jsonb ) as t(time timestamptz, name text, data jsonb, draw_options text, lifetime int) returning version"); // plotlyplot insert - conn->prepare("plotlyplots_insert", "INSERT INTO plotlyplots ( time, name, data, layout ) SELECT * FROM jsonb_to_recordset( $1::jsonb ) as t(time timestamptz, name text, data jsonb, layout jsonb)"); + conn->prepare("plotlyplots_insert", "INSERT INTO plotlyplots ( time, name, data, layout, lifetime ) SELECT * FROM jsonb_to_recordset( $1::jsonb ) as t(time timestamptz, name text, data jsonb, layout jsonb, lifetime int) returning version"); // calibration insert - conn->prepare("calibration_insert", "INSERT INTO calibration ( time, name, severity, message ) SELECT * FROM jsonb_to_recordset( $1::jsonb ) as t(time timestamptz, name text, description text, data jsonb)"); + conn->prepare("calibration_insert", "INSERT INTO calibration ( time, name, description, data ) SELECT * FROM jsonb_to_recordset( $1::jsonb ) as t(time timestamptz, name text, description text, data jsonb) returning version"); // device config insert - conn->prepare("device_config_insert", "INSERT INTO device_config ( time, device, author, description, data ) SELECT * FROM jsonb_to_recordset( $1::jsonb ) as t(time timestamptz, device text, author text, description text, data jsonb)"); + conn->prepare("device_config_insert", "INSERT INTO device_config ( time, device, author, description, data ) SELECT * FROM jsonb_to_recordset( $1::jsonb ) as t(time timestamptz, device text, author text, description text, data jsonb) returning version"); // run config insert - conn->prepare("run_config_insert", "INSERT INTO run_config ( time, name, author, description, data ) SELECT * FROM jsonb_to_recordset( $1::jsonb ) as t(time timestamptz, name text, author text, description text, data jsonb)"); + conn->prepare("run_config_insert", "INSERT INTO run_config ( time, name, author, description, data ) SELECT * FROM jsonb_to_recordset( $1::jsonb ) as t(time timestamptz, name text, author text, description text, data jsonb) returning config_id"); } } // FIXME if the DB goes down, implement some sort of pausing(?) or local recording to local disk (SQLite?) // we also use a single transaction for all queries, so open that now - pqxx::work tx(*conn.get()); // aka pqxx::transaction<> + pqxx::work* tx = new pqxx::work(*conn.get()); // aka pqxx::transaction<> +// pqxx::substransaction sub(tx); // aka create savepoint and rollback on error. possibly harmful for performance... + + // ok, so the problem with this is any query that fails within a transaction subsequently throws: + // 'current transaction is aborted, commands ignored until end of transaction block' + // for any further use, so we need to handle that. // insert new logging statements - try { - tx.exec(pqxx::prepped{"logging_insert"}, pqxx::params{m_args->logging_queue}); - ++(m_args->monitoring_vars->logging_submissions); - } catch (std::exception& e){ - ++(m_args->monitoring_vars->logging_submissions_failed); - // FIXME log the error here - // FIXME if we catch (pqxx::sql_error const &e) or others can we get better information? + printf("calling prepped for %d logging batches\n",m_args->logging_queue.size()); + for(std::string& batch : m_args->logging_queue){ + printf("dbworker inserting logging batch: '%s'\n",batch.c_str()); + try { + tx->exec(pqxx::prepped{"logging_insert"}, pqxx::params{batch}); + ++(m_args->monitoring_vars->logging_submissions); + } catch (std::exception& e){ + std::cerr<<"dbworker log insert failed with "<monitoring_vars->logging_submissions_failed); + // FIXME log the error here + // FIXME if we catch (pqxx::sql_error const &e) or others can we get better information? + // after error the transaction becomes unusable, and we must open a new one + delete tx; + tx = new pqxx::work(*conn.get()); + } } // insert new monitoring statements - try { - tx.exec(pqxx::prepped{"monitoring_insert"}, pqxx::params{m_args->monitoring_queue}); - ++(m_args->monitoring_vars->monitoring_submissions); - } catch (std::exception& e){ - ++(m_args->monitoring_vars->monitoring_submissions_failed); - // FIXME log the error here + printf("calling prepped for %d monitoring batches\n",m_args->monitoring_queue.size()); + for(std::string& batch : m_args->monitoring_queue){ + try { + tx->exec(pqxx::prepped{"monitoring_insert"}, pqxx::params{batch}); + ++(m_args->monitoring_vars->monitoring_submissions); + } catch (std::exception& e){ + ++(m_args->monitoring_vars->monitoring_submissions_failed); + std::cerr<<"dbworker mon insert failed with "<rootplot_queue}); - ++(m_args->monitoring_vars->rootplot_submissions); - } catch (std::exception& e){ - ++(m_args->monitoring_vars->rootplot_submissions_failed); - // FIXME log the error here + printf("calling prepped for %d rootplot batches\n",m_args->rootplot_queue.size()); + for(std::string& batch : m_args->rootplot_queue){ + try { + tx->exec(pqxx::prepped{"rootplots_insert"}, pqxx::params{batch}); + ++(m_args->monitoring_vars->rootplot_submissions); + } catch (std::exception& e){ + ++(m_args->monitoring_vars->rootplot_submissions_failed); + std::cerr<<"dbworker rootplot insert failed with "<plotlyplot_queue}); - ++(m_args->monitoring_vars->plotlyplot_submissions); - } catch (std::exception& e){ - ++(m_args->monitoring_vars->plotlyplot_submissions_failed); - // FIXME log the error here + printf("calling prepped for %d plotlyplot batches\n",m_args->plotlyplot_queue.size()); + for(std::string& batch : m_args->plotlyplot_queue){ + try { + tx->exec(pqxx::prepped{"plotlyplots_insert"}, pqxx::params{batch}); + ++(m_args->monitoring_vars->plotlyplot_submissions); + } catch (std::exception& e){ + ++(m_args->monitoring_vars->plotlyplot_submissions_failed); + std::cerr<<"dbworker plotlyplot insert failed with "<write_queue.size()); for(QueryBatch* batch : m_args->write_queue){ // the batch gets split up by WriteWorkers into a buffer for each type of write query // alarm insertions return nothing, just catch errors - try { - tx.exec(pqxx::prepped{"alarms_insert"}, pqxx::params{batch->alarm_buffer}); - batch->alarm_batch_success = true; - ++(m_args->monitoring_vars->alarm_submissions); - } catch (std::exception& e){ - batch->alarm_batch_success = false; - ++(m_args->monitoring_vars->alarm_submissions_failed); - // FIXME log the error here + if(batch->got_alarms()){ + printf("calling prepped for alarm buffer '%s'\n",batch->alarm_buffer.c_str()); + try { + tx->exec(pqxx::prepped{"alarms_insert"}, pqxx::params{batch->alarm_buffer}); + ++(m_args->monitoring_vars->alarm_submissions); + } catch (std::exception& e){ + batch->alarm_batch_err = current_exception_name()+": "+e.what(); + ++(m_args->monitoring_vars->alarm_submissions_failed); + std::cerr<<"dbworker alarm batch '"<alarm_buffer<<"' insert failed with "<devconfig_version_nums.push_back(new_version_num); - }, pqxx::params{batch->devconfig_buffer}); - ++(m_args->monitoring_vars->devconfig_submissions); - } catch (std::exception& e){ - ++(m_args->monitoring_vars->devconfig_submissions_failed); - // FIXME log the error here + if(batch->got_devconfigs()){ + printf("calling prepped for dev_config buffer '%s'\n",batch->devconfig_buffer.c_str()); + try { + tx->for_query(pqxx::prepped{"device_config_insert"}, + [&batch](int32_t new_version_num){ + batch->devconfig_version_nums.push_back(new_version_num); + }, pqxx::params{batch->devconfig_buffer}); + ++(m_args->monitoring_vars->devconfig_submissions); + } catch (std::exception& e){ + ++(m_args->monitoring_vars->devconfig_submissions_failed); + batch->devconfig_batch_err = current_exception_name()+": "+e.what(); + std::cerr<<"dbworker devconfig insert '"<devconfig_buffer<<"' failed with "<runconfig_version_nums.push_back(new_version_num); - }, pqxx::params{batch->runconfig_buffer}); - ++(m_args->monitoring_vars->runconfig_submissions); - } catch (std::exception& e){ - ++(m_args->monitoring_vars->runconfig_submissions_failed); - // FIXME log the error here + if(batch->got_runconfigs()){ + printf("calling prepped for run_config buffer '%s'\n",batch->runconfig_buffer.c_str()); + try { + tx->for_query(pqxx::prepped{"run_config_insert"}, + [&batch](int32_t new_version_num){ + batch->runconfig_version_nums.push_back(new_version_num); + }, pqxx::params{batch->runconfig_buffer}); + ++(m_args->monitoring_vars->runconfig_submissions); + } catch (std::exception& e){ + ++(m_args->monitoring_vars->runconfig_submissions_failed); + batch->runconfig_batch_err = current_exception_name()+": "+e.what(); + std::cerr<<"dbworker runconfig insert '"<runconfig_buffer<<"' failed with "<calibration_version_nums.push_back(new_version_num); - }, pqxx::params{batch->calibration_buffer}); - ++(m_args->monitoring_vars->calibration_submissions); - } catch (std::exception& e){ - ++(m_args->monitoring_vars->calibration_submissions_failed); - // FIXME log the error here + if(batch->got_calibrations()){ + printf("calling prepped for calibration buffer '%s'\n",batch->calibration_buffer.c_str()); + try { + tx->for_query(pqxx::prepped{"calibration_insert"}, + [&batch](int32_t new_version_num){ + batch->calibration_version_nums.push_back(new_version_num); + }, pqxx::params{batch->calibration_buffer}); + ++(m_args->monitoring_vars->calibration_submissions); + } catch (std::exception& e){ + ++(m_args->monitoring_vars->calibration_submissions_failed); + batch->calibration_batch_err = current_exception_name()+": "+e.what(); + std::cerr<<"dbworker calibration insert '"<calibration_buffer<<"' failed with "<rootplot_version_nums.push_back(new_version_num); - }, pqxx::params{batch->rooplot_buffer}); - ++(m_args->monitoring_vars->rootplot_submissions); - } catch (std::exception& e){ - ++(m_args->monitoring_vars->rootplot_submissions_failed); - // FIXME log the error here + if(batch->got_rootplots()){ + printf("calling prepped for rootplots buffer '%s'\n",batch->rootplot_buffer.c_str()); + try { + tx->for_query(pqxx::prepped{"rootplots_insert"}, + [&batch](int32_t new_version_num){ + batch->rootplot_version_nums.push_back(new_version_num); + }, pqxx::params{batch->rootplot_buffer}); + ++(m_args->monitoring_vars->rootplot_submissions); + } catch (std::exception& e){ + ++(m_args->monitoring_vars->rootplot_submissions_failed); + batch->rootplot_batch_err = current_exception_name()+": "+e.what(); + std::cerr<<"dbworker rootplot insert '"<rootplot_buffer<<"' failed with "<plotlyplot_version_nums.push_back(new_version_num); - }, pqxx::params{batch->plotlyplot_buffer}); - ++(m_args->monitoring_vars->plotlyplot_submissions); - } catch (std::exception& e){ - ++(m_args->monitoring_vars->plotlyplot_submissions_failed); - // FIXME log the error here + if(batch->got_plotlyplots()){ + printf("calling prepped for plotlyplots buffer '%s'\n",batch->plotlyplot_buffer.c_str()); + try { + tx->for_query(pqxx::prepped{"plotlyplots_insert"}, + [&batch](int32_t new_version_num){ + batch->plotlyplot_version_nums.push_back(new_version_num); + }, pqxx::params{batch->plotlyplot_buffer}); + ++(m_args->monitoring_vars->plotlyplot_submissions); + } catch (std::exception& e){ + ++(m_args->monitoring_vars->plotlyplot_submissions_failed); + batch->plotlyplot_batch_err = current_exception_name()+": "+e.what(); + std::cerr<<"dbworker plotlyplot insert '"<plotlyplot_buffer<<"' failed with "<generic_write_query_indices.size()); for(size_t i : batch->generic_write_query_indices){ ZmqQuery& query = batch->queries[i]; try { - query.result = tx.exec(query.msg()); - ++(m_args->monitoring_vars->genericwrite_submissions); + query.result = tx->exec(query.msg()); + ++(m_args->monitoring_vars->generic_submissions); } catch (std::exception& e){ - ++(m_args->monitoring_vars->genericwrite_submissions_failed); + ++(m_args->monitoring_vars->generic_submissions_failed); + query.result.clear(); + query.err = current_exception_name()+": "+e.what(); + std::cerr<<"dbworker generic query '"<(&e); + //if(sqle) std::cerr<<"SQLSTATE is now "<sqlstate()<ids.clear(); + m_args->pipeline_error = false; + printf("processing %d read query batches\n",m_args->read_queue.size()); for(QueryBatch* batch : m_args->read_queue){ + + printf("pipelining batch of %d read queries\n",batch->queries.size()); + // it may be best to set the pipeline to retain ~the number of queries we're going to insert, // so that it runs them all in one. TODO or maybe do it in two halves? px.retain(batch->queries.size()); - // insert all the queries + // push all the queries to the DB for(ZmqQuery& query : batch->queries){ - px.insert(query.msg()); // returns a unique query_id (aka long) + m_args->ids.push_back(px.insert(query.msg())); } - // and then get the results - for(ZmqQuery& query : batch->queries){ + // pull the results + for(size_t i=0; iqueries.size(); ++i){ + ZmqQuery& query = batch->queries[i]; try { - query.result.clear(); // should be redundant...but in case of error in ResultWorkers if(px.empty()){ // we should never find the pipeline empty! ... i think? - // not sure if this may happen if we check too soon?? (i.e. no results ready *yet*?) FIXME?? - // we call retreive once for each insert, somehow we've got out of sync!! - // FIXME log error, somehow we need to undo this mess. - // maybe it's best we do keep those query_ids after all...? - throw pqxx::failure{"empty pipeline"}; // or something..? + // unless this happens if we check too soon?? (i.e. no results ready *yet*?) FIXME?? + m_args->pipeline_error=true; + break; } else { - query.result = px.retrieve().second; - // technically this returns a pair of {query_id, result} - // TODO for safety we could ensure the id's match... + // FIXME note that retrieving a specific id will block until that result is available + // which means a long-running query will hold up sending replies to faster ones.... + // actually, we'd have to wait on the whole pipeline before returning anyway, + // but this is an issue with batching... + query.result = px.retrieve(m_args->ids[i]); ++(m_args->monitoring_vars->readquery_submissions); - // FIXME technically we should decrement this if we throw anywhere as the whole lot gets rolled back? } } catch (std::exception& e){ ++(m_args->monitoring_vars->readquery_submissions_failed); - // how do we encapsulate this error in the pqxx::result class? - // if the query returns no rows, does result.empty() return the same as if it has no result? - query.result.clear(); // this sets `m_query=nullptr` so maybe we can use that as a check... + query.result.clear(); // this sets `m_query=nullptr` so maybe we can use that as a check...? FIXME + query.err = current_exception_name()+": "+e.what(); // store info about what failed + std::cerr<<"dbworker read query '"<pipeline_error || !px.empty()){ + // either we broke early because the pipeline was empty before we got all results, + // or pipeline is not empty and we're missing some... + std::cerr<<"dbworker pipeline error retrieving all results!"<commit(); } catch(std::exception& e){ // oh yeaaa, the transaction might have commited, or it might not have. awesome. // our consolation prize is a `pqxx::in_doubt_error`. // FIXME supposedly, it is up to us to determine whether it committed or not // perhaps by attempting to query whether the inserted records are found... + std::cerr<<"dbworker caught "< locker(m_args->m_data->query_replies_mtx); - m_args->m_data->query_replies.insert(m_args->m_data->query_replies.end(), - m_args->write_queue.begin(),m_args->write_queue.end()); + if(!m_args->write_queue.empty()){ + // FIXME FIXME FIXME rename 'read_replies' to 'raw_replies' or something better + printf("returning %d write acknowledgements to datamodel\n", m_args->write_queue.size()); + std::unique_lock locker(m_args->m_data->read_replies_mtx); + m_args->m_data->read_replies.insert(m_args->m_data->read_replies.end(), + m_args->write_queue.begin(),m_args->write_queue.end()); + } - locker = std::unique_lock(m_args->m_data->read_replies_mtx); - m_args->m_data->read_replies.insert(m_args->m_data->read_replies.end(), - m_args->read_queue.begin(),m_args->read_queue.end()); - locker.unlock(); + if(!m_args->read_queue.empty()){ + printf("returning %d read replies to datamodel\n", m_args->read_queue.size()); + std::unique_lock locker(m_args->m_data->read_replies_mtx); + m_args->m_data->read_replies.insert(m_args->m_data->read_replies.end(), + m_args->read_queue.begin(),m_args->read_queue.end()); + } - std::cerr<m_job_name<<" completed"<m_job_name.c_str()); ++(m_args->monitoring_vars->jobs_completed); // return our job args to the pool diff --git a/UserTools/DatabaseWorkers/DatabaseWorkers.h b/UserTools/DatabaseWorkers/DatabaseWorkers.h index 9d9fd14..7c8dbab 100644 --- a/UserTools/DatabaseWorkers/DatabaseWorkers.h +++ b/UserTools/DatabaseWorkers/DatabaseWorkers.h @@ -33,6 +33,9 @@ struct DatabaseJobStruct { std::vector rootplot_queue; std::vector plotlyplot_queue; + std::vector ids; + bool pipeline_error; + void clear(){ read_queue.clear(); write_queue.clear(); diff --git a/UserTools/Monitoring/Monitoring.cpp b/UserTools/Monitoring/Monitoring.cpp index c30320e..a0edca9 100644 --- a/UserTools/Monitoring/Monitoring.cpp +++ b/UserTools/Monitoring/Monitoring.cpp @@ -25,7 +25,10 @@ bool Monitoring::Initialise(std::string configfile, DataModel &data){ thread_args.last_send = std::chrono::steady_clock::now(); thread_args.m_data = m_data; thread_args.monitoring_vars = &monitoring_vars; - m_data->utils.CreateThread("monitoring", &Thread, &thread_args); // thread needs a unique name + if(!m_data->utils.CreateThread("monitoring", &Thread, &thread_args)){ + Log(m_tool_name+": Failed to spawn background thread",v_error,m_verbose); + return false; + } m_data->num_threads++; //m_data->services->AddService("middleman", 5000); // is this needed? what for?? @@ -74,7 +77,7 @@ void Monitoring::Thread(Thread_args* args){ for(std::pair& mon : m_args->m_data->monitoring_variables){ - std::string s="{\"time\":0, \"device\":\"middleman\",\"subject\":\""+mon.first+"\", \"data\":"+mon.second->toJSON()+"}"; + std::string s="{\"time\":0, \"device\":\"middleman\",\"subject\":\""+mon.first+"\", \"data\":"+mon.second->GetJson()+"}"; std::unique_lock locker(m_args->m_data->out_mon_msg_queue_mtx); m_args->m_data->out_mon_msg_queue.push_back(s); diff --git a/UserTools/MulticastReceiverSender/MulticastReceiverSender.cpp b/UserTools/MulticastReceiverSender/MulticastReceiverSender.cpp index 8345ad0..77dd830 100644 --- a/UserTools/MulticastReceiverSender/MulticastReceiverSender.cpp +++ b/UserTools/MulticastReceiverSender/MulticastReceiverSender.cpp @@ -35,6 +35,8 @@ bool MulticastReceiverSender::Initialise(std::string configfile, DataModel &data if(type_str=="logging") multicast_address = "239.192.1.2"; else multicast_address = "239.192.1.3"; } + printf("%s binding to %s:%d\n",m_tool_name.c_str(),multicast_address.c_str(),port); + // buffer received messages in a local vector until size exceeds local_buffer_size... m_variables.Get("local_buffer_size",local_buffer_size); // ... or time since last transfer exceeds transfer_period_ms @@ -79,12 +81,22 @@ bool MulticastReceiverSender::Initialise(std::string configfile, DataModel &data Log(m_tool_name+": Failed to set multicast socket to non-blocking with error "+strerror(errno),v_warning); } + // format destination address from IP string struct sockaddr_in addr; - bzero((char *)&addr, sizeof(addr)); // init to 0 + socklen_t addrlen = sizeof(addr); + bzero((char *)&addr, addrlen); // init to 0 addr.sin_family = AF_INET; addr.sin_port = htons(port); + // to receive traffic from a specific group, either bind to that group *and* join the group +// inet_aton(multicast_address.c_str(), &addr.sin_addr); + // or bind to INADDR_ANY, disable IP_MULTICAST_ALL, and then join the group + addr.sin_addr.s_addr = htonl(INADDR_ANY); + a=0; + setsockopt(socket_handle, IPPROTO_IP, IP_MULTICAST_ALL, &a, sizeof(int)); + + /* FIXME FIXME FIXME // sending: which multicast group to send to get_ok = inet_aton(multicast_address.c_str(), &addr.sin_addr); if(get_ok==0){ // returns 0 if invalid, unlike other functions @@ -92,16 +104,16 @@ bool MulticastReceiverSender::Initialise(std::string configfile, DataModel &data return false; } - // used in sendto / recvfrom methods - socklen_t addrlen = sizeof(addr); - - /* FIXME FIXME FIXME // for two-way comms, we should bind to INADDR_ANY, not a specific multicast address.... maybe? struct sockaddr_in multicast_addr2; bzero((char *)&multicast_addr2, sizeof(multicast_addr2)); // init to 0 multicast_addr2.sin_family = AF_INET; multicast_addr2.sin_port = htons(log_port); multicast_addr2.sin_addr.s_addr = htonl(INADDR_ANY); << like this + + // disable receiving multicast messages we send + a=0; + setsockopt(sock.at(i), SOL_SOCKET, IP_MULTICAST_LOOP, &a, sizeof(a)); */ // to listen we need to bind to the socket @@ -143,6 +155,7 @@ bool MulticastReceiverSender::Initialise(std::string configfile, DataModel &data thread_args.poll_timeout_ms = poll_timeout_ms; thread_args.local_buffer_size = local_buffer_size; thread_args.in_local_queue = m_data->multicast_buffer_pool.GetNew(local_buffer_size); + thread_args.in_local_queue->resize(0); thread_args.last_transfer = std::chrono::steady_clock::now(); thread_args.transfer_period_ms = std::chrono::milliseconds{transfer_period_ms}; thread_args.in_queue = &m_data->in_multicast_msg_queue; @@ -156,7 +169,11 @@ bool MulticastReceiverSender::Initialise(std::string configfile, DataModel &data } // thread needs a unique name - m_data->utils.CreateThread(type_str+"_sendreceiver", &Thread, &thread_args); + printf("spawning %s send/receiver thread\n",type_str.c_str()); + if(!m_data->utils.CreateThread(type_str+"_sendreceiver", &Thread, &thread_args)){ + Log(m_tool_name+": Failed to spawn background thread",v_error,m_verbose); + return false; + } m_data->num_threads++; return true; @@ -172,6 +189,9 @@ bool MulticastReceiverSender::Execute(){ // FIXME if restarts > X times in last Y mins, alarm (bypass, shove into DB? send to websocket?) and StopLoop. ++(monitoring_vars.thread_crashes); } + monitoring_vars.Set("buffered_in_messages",thread_args.in_local_queue->size()); + monitoring_vars.Set("waiting_out_messages",thread_args.out_local_queue.size()); + //monitoring_vars.Set("last_transfer",thread_args.last_transfer); // FIXME FIXME FIXME need cast to string return true; } @@ -180,7 +200,8 @@ bool MulticastReceiverSender::Execute(){ bool MulticastReceiverSender::Finalise(){ // signal background receiver thread to stop - Log(m_tool_name+": Joining receiver thread",v_warning); + //Log(m_tool_name+": Joining receiver thread",v_warning); + printf("joining %s receiver thread\n",m_tool_name.c_str()); m_data->utils.KillThread(&thread_args); m_data->num_threads--; @@ -220,16 +241,16 @@ void MulticastReceiverSender::Thread(Thread_args* arg){ // ===================== if(!m_args->in_local_queue->empty() && ((m_args->in_local_queue->size()>m_args->local_buffer_size) || - (m_args->last_transfer - std::chrono::steady_clock::now()) > m_args->transfer_period_ms) ){ + (std::chrono::steady_clock::now() - m_args->last_transfer) > m_args->transfer_period_ms) ){ - std::clog<m_tool_name<<": adding "<in_local_queue->size() - <<" messages to datamodel"<in_local_queue->size(), m_args->m_tool_name.c_str()); std::unique_lock locker(*m_args->in_queue_mtx); m_args->in_queue->push_back(m_args->in_local_queue); locker.unlock(); m_args->in_local_queue = m_data->multicast_buffer_pool.GetNew(m_args->local_buffer_size); + m_args->in_local_queue->resize(0); m_args->last_transfer = std::chrono::steady_clock::now(); ++(m_args->monitoring_vars->in_buffer_transfers); @@ -264,7 +285,7 @@ void MulticastReceiverSender::Thread(Thread_args* arg){ // read // ==== if(m_args->poll.revents & ZMQ_POLLIN){ - std::clog<m_tool_name<<": receiving message"<m_tool_name.c_str()); // read the messge FIXME name max num bytes in multicast message m_args->get_ok = recvfrom(m_args->socket, m_args->message, 655355, 0, (struct sockaddr*)&m_args->addr, &m_args->addrlen); @@ -290,7 +311,7 @@ void MulticastReceiverSender::Thread(Thread_args* arg){ // ===== if(m_args->out_i < m_args->out_local_queue.size()){ - std::clog<m_tool_name<<": sending message"<m_tool_name.c_str()); // Get the message std::string& message = m_args->out_local_queue[m_args->out_i++]; // always increment, even if error @@ -312,7 +333,7 @@ void MulticastReceiverSender::Thread(Thread_args* arg){ // else see if there are any in datamodel to grab std::unique_lock locker(*m_args->out_queue_mtx); if(!m_args->out_queue->empty()){ - std::clog<m_tool_name<<": receiving fetching new outgoing logging messages"<m_tool_name.c_str()); std::swap(*m_args->out_queue, m_args->out_local_queue); ++(m_args->monitoring_vars->out_buffer_transfers); m_args->out_i=0; @@ -321,5 +342,7 @@ void MulticastReceiverSender::Thread(Thread_args* arg){ } + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + return; } diff --git a/UserTools/MulticastWorkers/MulticastWorkers.cpp b/UserTools/MulticastWorkers/MulticastWorkers.cpp index ff369e5..43ec3d5 100644 --- a/UserTools/MulticastWorkers/MulticastWorkers.cpp +++ b/UserTools/MulticastWorkers/MulticastWorkers.cpp @@ -28,7 +28,11 @@ bool MulticastWorkers::Initialise(std::string configfile, DataModel &data){ thread_args.m_data = m_data; thread_args.monitoring_vars = &monitoring_vars; - m_data->utils.CreateThread("multicast_job_distributor", &Thread, &thread_args); // thread needs a unique name + // thread needs a unique name + if(!m_data->utils.CreateThread("multicast_job_distributor", &Thread, &thread_args)){ + Log(m_tool_name+": Failed to spawn background thread",v_error,m_verbose); + return false; + } m_data->num_threads++; return true; @@ -107,6 +111,7 @@ void MulticastWorkers::Thread(Thread_args* args){ the_job->fail_func = MulticastMessageFail; //multicast_jobs.AddJob(the_job); + printf("spawning new multicastjob for %d messages\n",job_data->msg_buffer->size()); m_args->m_data->job_queue.AddJob(the_job); } @@ -172,6 +177,8 @@ bool MulticastWorkers::MulticastMessageJob(void*& arg){ // subsequently, all we need to do here is concatenate the JSONs + printf("MulticastWorker job processing %d batches\n",m_args->msg_buffer->size()); + m_args->logging_buffer = "["; m_args->monitoring_buffer = "["; m_args->rootplot_buffer = "["; @@ -185,8 +192,10 @@ bool MulticastWorkers::MulticastMessageJob(void*& arg){ // the Services class always puts the topic first, // and all topics start with a unique character (XXX for now?), // so we don't need to parse the message to identify the topic: +// printf("validating first 9 chars are topic: '%s', %d\n",next_msg.substr(0,9).c_str(),strcmp(next_msg.substr(0,9).c_str(),"{\"topic\":")); if(next_msg.substr(0,9)!="{\"topic\":"){ // FIXME log it as bad multicast + printf("Ignoring Bad multicast message '%s'\n",next_msg.c_str()); continue; } @@ -204,11 +213,13 @@ bool MulticastWorkers::MulticastMessageJob(void*& arg){ m_args->out_buffer = &m_args->plotlyplot_buffer; break; default: + printf("MCworkerJob: unknown multicast topic '%c' in message '%s'\n",next_msg[10],next_msg); continue; // FIXME unknown topic: error log it. } if(m_args->out_buffer->length()>1) (*m_args->out_buffer) += ", "; (*m_args->out_buffer) += next_msg; + printf("added message '%s'\n",next_msg.c_str()); ++(m_args->monitoring_vars->msgs_processed); @@ -219,6 +230,7 @@ bool MulticastWorkers::MulticastMessageJob(void*& arg){ m_args->logging_buffer += "]"; std::unique_lock locker(m_args->m_data->log_query_queue_mtx); m_args->m_data->log_query_queue.push_back(m_args->logging_buffer); + printf("multicast worker adding '%s' to logging buffer\n",m_args->logging_buffer.c_str()); } if(m_args->monitoring_buffer.length()!=1){ @@ -243,7 +255,7 @@ bool MulticastWorkers::MulticastMessageJob(void*& arg){ m_args->msg_buffer->clear(); m_args->m_data->multicast_buffer_pool.Add(m_args->msg_buffer); - std::cerr<m_job_name<<" completed"<m_job_name.c_str()); ++(m_args->monitoring_vars->jobs_completed); m_args->m_pool->Add(m_args); // return our job args to the job args struct pool diff --git a/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.cpp b/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.cpp index 3a8179b..e2be98d 100644 --- a/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.cpp +++ b/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.cpp @@ -16,7 +16,7 @@ bool ReadQueryReceiverReplySender::Initialise(std::string configfile, DataModel /* Configuration */ /* ----------------------------------------- */ - port_name = "db_read"; + remote_port_name = "db_read"; // FIXME do these timeouts need to be << transfer_period_ms? int rcv_timeout_ms=500; int snd_timeout_ms=500; @@ -26,7 +26,7 @@ bool ReadQueryReceiverReplySender::Initialise(std::string configfile, DataModel int local_buffer_size = 200; int transfer_period_ms = 200; - m_variables.Get("port_name", port_name); + m_variables.Get("remote_port_name", remote_port_name); m_variables.Get("rcv_hwm", rcv_hwm); // max num outstanding messages in receive buffer m_variables.Get("conns_backlog", conns_backlog); // max num oustanding connection requests m_variables.Get("poll_timeout_ms",poll_timeout_ms); @@ -47,7 +47,7 @@ bool ReadQueryReceiverReplySender::Initialise(std::string configfile, DataModel ManagedSocket* managed_socket = new ManagedSocket; managed_socket->service_name=""; // attach to any client type... - managed_socket->port_name = port_name; // ...that advertises a service on port 'port_name' + managed_socket->remote_port_name = remote_port_name; // ...that advertises a service on port 'remote_port_name' managed_socket->socket = new zmq::socket_t(*m_data->context, ZMQ_ROUTER); managed_socket->socket->setsockopt(ZMQ_SNDTIMEO, snd_timeout_ms); managed_socket->socket->setsockopt(ZMQ_RCVTIMEO, rcv_timeout_ms); @@ -70,7 +70,7 @@ bool ReadQueryReceiverReplySender::Initialise(std::string configfile, DataModel // add the socket to the datamodel for the SocketManager, which will handle making new connections to clients std::unique_lock locker(m_data->managed_sockets_mtx); - m_data->managed_sockets[port_name] = managed_socket; + m_data->managed_sockets[remote_port_name] = managed_socket; /* ----------------------------------------- */ /* Thread Setup */ @@ -92,8 +92,13 @@ bool ReadQueryReceiverReplySender::Initialise(std::string configfile, DataModel thread_args.make_new = true; thread_args.local_buffer_size = local_buffer_size; thread_args.transfer_period_ms = std::chrono::milliseconds{transfer_period_ms}; + thread_args.last_transfer = std::chrono::steady_clock::now(); - m_data->utils.CreateThread("readrep_sendreceiver", &Thread, &thread_args); // thread needs a unique name + // thread needs a unique name + if(!m_data->utils.CreateThread("readrep_sendreceiver", &Thread, &thread_args)){ + Log(m_tool_name+": Failed to spawn background thread",v_error,m_verbose); + return false; + } m_data->num_threads++; return true; @@ -130,10 +135,10 @@ bool ReadQueryReceiverReplySender::Finalise(){ } */ - if(m_data->managed_sockets.count(port_name)){ + if(m_data->managed_sockets.count(remote_port_name)){ std::unique_lock locker(m_data->managed_sockets_mtx); - ManagedSocket* sock = m_data->managed_sockets[port_name]; - m_data->managed_sockets.erase(port_name); + ManagedSocket* sock = m_data->managed_sockets[remote_port_name]; + m_data->managed_sockets.erase(remote_port_name); locker.unlock(); if(sock->socket) delete sock->socket; // destructor closes socket delete sock; @@ -155,13 +160,13 @@ void ReadQueryReceiverReplySender::Thread(Thread_args* args){ // transfer to datamodel // ===================== if(m_args->in_local_queue->queries.size() >= m_args->local_buffer_size || - (m_args->last_transfer - std::chrono::steady_clock::now()) > m_args->transfer_period_ms){ + (std::chrono::steady_clock::now() - m_args->last_transfer) > m_args->transfer_period_ms){ - if(!m_args->make_new) m_args->in_local_queue->queries.pop_back(); if(!m_args->in_local_queue->queries.empty()){ - std::clog<m_tool_name<<": added "<in_local_queue->queries.size() - <<" messages to datamodel"<make_new) m_args->in_local_queue->queries.pop_back(); + + printf("%s adding %ld messages to datamodel\n",m_args->m_tool_name.c_str(),m_args->in_local_queue->queries.size()); std::unique_lock locker(m_args->m_data->read_msg_queue_mtx); m_args->m_data->read_msg_queue.push_back(m_args->in_local_queue); @@ -169,11 +174,13 @@ void ReadQueryReceiverReplySender::Thread(Thread_args* args){ m_args->in_local_queue = m_args->m_data->querybatch_pool.GetNew(m_args->local_buffer_size); - m_args->last_transfer = std::chrono::steady_clock::now(); m_args->make_new=true; ++(m_args->monitoring_vars->in_buffer_transfers); } + + m_args->last_transfer = std::chrono::steady_clock::now(); + } // poll @@ -211,7 +218,7 @@ void ReadQueryReceiverReplySender::Thread(Thread_args* args){ // read // ==== if(m_args->polls[0].revents & ZMQ_POLLIN){ - std::clog<m_tool_name<<" receiving message"<m_tool_name.c_str()); if(m_args->make_new){ m_args->in_local_queue->queries.emplace_back(); @@ -222,23 +229,18 @@ void ReadQueryReceiverReplySender::Thread(Thread_args* args){ // received parts are [client, topic, msg_id, query] // reorder parts on receipt as client and msg_id will be left untouched and re-used for response static constexpr char part_order[4] = {0,2,1,3}; + m_args->msg_parts=0; try { std::unique_lock locker(*m_args->socket_mtx); - for(m_args->msg_parts=0; m_args->msg_parts<4; ++m_args->msg_parts){ - m_args->get_ok = m_args->socket->recv(&msg_buf[part_order[m_args->msg_parts]]); - if(!m_args->get_ok || !msg_buf[part_order[m_args->msg_parts]].more()) break; - } + printf("%s receiving part...",m_args->m_tool_name.c_str()); + do { + m_args->get_ok = m_args->socket->recv(&msg_buf[part_order[std::min(3,m_args->msg_parts++)]]); + printf("%d=%d (more: %d),...",m_args->msg_parts,m_args->get_ok,msg_buf[part_order[std::min(3,m_args->msg_parts-1)]].more()); + } while(m_args->get_ok && msg_buf[part_order[std::min(3,m_args->msg_parts-1)]].more()); locker.unlock(); - - // if there are more than 4 parts, read the remainder to flush the buffer, but discard the message - if(m_args->get_ok && msg_buf[3].more()){ - while(true){ - m_args->socket->recv(&m_args->msg_discard); - ++m_args->msg_parts; - } - } + printf("\n"); // if the read failed, discard the message if(!m_args->get_ok){ @@ -250,6 +252,11 @@ void ReadQueryReceiverReplySender::Thread(Thread_args* args){ } else if(m_args->msg_parts!=4){ std::cerr<m_tool_name<<": Unexpected "<msg_parts<<" part message"<msg_parts; ++i){ + char msg_str[msg_buf[part_order[i]].size()]; + snprintf(&msg_str[0], msg_buf[part_order[i]].size()+1, "%s", msg_buf[part_order[i]].data()); + printf("\tpart %d: %s\n",i, msg_str); + } // FIXME print other info we have (client, message, parts) to help identify culprit // FIXME do we do this? for efficiency? here? do we add a flag for bad and do it in the processing? // FIXME do we try to make a query out of the first 4 parts? i'm gonna say no, for now @@ -260,6 +267,7 @@ void ReadQueryReceiverReplySender::Thread(Thread_args* args){ m_args->make_new=true; ++(m_args->monitoring_vars->msgs_rcvd); + printf("%s received query %u, '%s' message '%s' into ZmqQuery at %p, %p\n",m_args->m_tool_name.c_str(), msg_buf.msg_id(), msg_buf.topic().data(), msg_buf.msg().data(), &msg_buf, &msg_buf.parts[3]); } @@ -292,11 +300,12 @@ void ReadQueryReceiverReplySender::Thread(Thread_args* args){ // check we had a listener ready if(m_args->polls[1].revents & ZMQ_POLLOUT){ - std::clog<m_tool_name<<" sending reply"<m_tool_name.c_str(),m_args->out_i,m_args->out_local_queue->queries.size()); // FIXME better logging ZmqQuery& rep = m_args->out_local_queue->queries[m_args->out_i++]; // FIXME maybe don't pop (increment out_i) until send succeeds? // FIXME maybe impelement 'retries' mechanism as previously? + printf("reply to message %u has %d parts\n", rep.msg_id(), rep.size()); try { @@ -320,8 +329,11 @@ void ReadQueryReceiverReplySender::Thread(Thread_args* args){ */ return; } + // FIXME if we do implement re-sending, then do not do this + rep.parts.resize(0); // safety to prevent accidentally accessing sent messages, which can segfault // else success + printf("%s reply at %p sent\n",m_args->m_tool_name.c_str(), &rep); ++(m_args->monitoring_vars->msgs_sent); } catch(zmq::error_t& err){ @@ -348,10 +360,11 @@ void ReadQueryReceiverReplySender::Thread(Thread_args* args){ std::unique_lock locker(m_args->m_data->query_replies_mtx); if(!m_args->m_data->query_replies.empty()){ - std::clog<m_tool_name<<": fetching new replies"<m_tool_name.c_str()); // return our batch to the pool if applicable if(m_args->out_local_queue!=nullptr){ + m_args->out_local_queue->queries.clear(); m_args->m_data->querybatch_pool.Add(m_args->out_local_queue); m_args->out_local_queue = nullptr; } diff --git a/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.h b/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.h index a179a4a..80519d3 100644 --- a/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.h +++ b/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.h @@ -56,7 +56,7 @@ class ReadQueryReceiverReplySender: public Tool { ReadQueryReceiverReplySender_args thread_args; ReadReceiveMonitoring monitoring_vars; - std::string port_name; // name by which clients advertise sockets for sending read queries to the DB + std::string remote_port_name; // name by which clients advertise sockets for sending read queries to the DB }; diff --git a/UserTools/ResultWorkers/ResultWorkers.cpp b/UserTools/ResultWorkers/ResultWorkers.cpp index 8225401..29f2811 100644 --- a/UserTools/ResultWorkers/ResultWorkers.cpp +++ b/UserTools/ResultWorkers/ResultWorkers.cpp @@ -20,7 +20,10 @@ bool ResultWorkers::Initialise(std::string configfile, DataModel &data){ thread_args.m_data = m_data; thread_args.monitoring_vars = &monitoring_vars; - m_data->utils.CreateThread("result_job_distributor", &Thread, &thread_args); + if(!m_data->utils.CreateThread("result_job_distributor", &Thread, &thread_args)){ + Log(m_tool_name+": Failed to spawn background thread",v_error,m_verbose); + return false; + } m_data->num_threads++; return true; @@ -63,9 +66,8 @@ void ResultWorkers::Thread(Thread_args* args){ // grab a batch of read queries, with results awaiting conversion std::unique_lock locker(m_args->m_data->read_replies_mtx); - if(!m_args->m_data->read_replies.empty()){ - std::swap(m_args->m_data->read_replies, m_args->local_msg_queue); - } + if(m_args->m_data->read_replies.empty()) return; + std::swap(m_args->m_data->read_replies, m_args->local_msg_queue); locker.unlock(); // add a job for each batch to the queue @@ -95,6 +97,7 @@ void ResultWorkers::Thread(Thread_args* args){ m_args->m_data->job_queue.AddJob(the_job); } + m_args->local_msg_queue.clear(); // TODO add workers that also call setstatus /setversion on batch jobs and then pass them to send thread? // maybe we can generalise to setreply if needed, depending on reply format & batching of read queries @@ -146,9 +149,9 @@ bool ResultWorkers::ResultJob(void*& arg){ // set whether the query succeeded or threw an exception if(query.result.query().empty()){ // FIXME not sure if this is a good check necessarily, esp w/pipelining? - query.setsuccess(0); - query.setresponserows(0); + query.setresponserows(1); + query.setresponse(0, query.err); } else { query.setsuccess(1); @@ -217,8 +220,8 @@ bool ResultWorkers::ResultJob(void*& arg){ } // generic query, manual json formation rom fields - // release pqxx::result - query.result.clear(); + // release pqxx::result and clear error + query.Clear(); } // if we had a result object } // loop over queries in this batch @@ -245,48 +248,60 @@ bool ResultWorkers::ResultJob(void*& arg){ switch(query_topic{query.topic()[2]}){ // alarms return just the success status case query_topic::alarm: - query.setsuccess(m_args->batch->alarm_batch_success); + query.setsuccess(m_args->batch->alarm_batch_err.empty()); query.setresponserows(0); break; // everything else returns a version number case query_topic::dev_config: query.setsuccess(devconfigs_ok); + query.setresponserows(1); if(devconfigs_ok){ - query.setresponserows(1); query.setresponse(0, m_args->batch->devconfig_version_nums[devconfig_i++]); + } else { + // FIXME is it worth propagating the error back to the user? + // since it's a batch insert, the error may have nothing to do with their query... + query.setresponse(0, m_args->batch->devconfig_batch_err); } break; case query_topic::run_config: query.setsuccess(runconfigs_ok); + query.setresponserows(1); if(runconfigs_ok){ - query.setresponserows(1); query.setresponse(0, m_args->batch->runconfig_version_nums[runconfig_i++]); + } else { + query.setresponse(0, m_args->batch->runconfig_batch_err); } break; case query_topic::calibration: query.setsuccess(calibrations_ok); + query.setresponserows(1); if(calibrations_ok){ - query.setresponserows(1); query.setresponse(0, m_args->batch->calibration_version_nums[calibration_i++]); + } else { + query.setresponse(0, m_args->batch->calibration_batch_err); } break; case query_topic::plotlyplot: query.setsuccess(plotlyplots_ok); + query.setresponserows(1); if(plotlyplots_ok){ - query.setresponserows(1); query.setresponse(0, m_args->batch->plotlyplot_version_nums[plotlyplot_i++]); + } else { + query.setresponse(0, m_args->batch->plotlyplot_batch_err); } break; case query_topic::rootplot: query.setsuccess(rootplots_ok); + query.setresponserows(1); if(rootplots_ok){ - query.setresponserows(1); query.setresponse(0, m_args->batch->rootplot_version_nums[rootplot_i++]); + } else { + query.setresponse(0, m_args->batch->rootplot_batch_err); } break; @@ -298,6 +313,7 @@ bool ResultWorkers::ResultJob(void*& arg){ // around a user's generic sql, we can combine this with the above. // But, given the arbitrary complexity of statements, this may not be possible. // in which case, we need to loop over rows and convert them to JSON manually + query.setresponserows(std::size(query.result)); for(size_t i=0; imonitoring_vars->result_access_errors); } break; default: // FIXME corrupted topic, log it. - //std::cerr<m_job_name<<" unknown topic "<m_data->query_replies.push_back(m_args->batch); locker.unlock(); - std::cerr<m_job_name<<" completed"<m_job_name.c_str()); ++(m_args->monitoring_vars->jobs_completed); // return our job args to the pool diff --git a/UserTools/SocketManager/SocketManager.cpp b/UserTools/SocketManager/SocketManager.cpp index afcf3e6..8cf20b9 100644 --- a/UserTools/SocketManager/SocketManager.cpp +++ b/UserTools/SocketManager/SocketManager.cpp @@ -10,8 +10,10 @@ bool SocketManager::Initialise(std::string configfile, DataModel &data){ InitialiseConfiguration(configfile); //m_variables.Print(); - if(!m_variables.Get("verbose",m_verbose)) m_verbose=1; + m_verbose=1; int update_ms=2000; + + m_variables.Get("verbose",m_verbose); m_variables.Get("update_ms",update_ms); ExportConfiguration(); @@ -28,9 +30,14 @@ bool SocketManager::Initialise(std::string configfile, DataModel &data){ thread_args.update_period_ms = std::chrono::milliseconds{update_ms}; thread_args.last_update = std::chrono::steady_clock::now(); - m_data->utils.CreateThread("socket_manager", &Thread, &thread_args); + if(!m_data->utils.CreateThread("socket_manager", &Thread, &thread_args)){ + Log(m_tool_name+": Failed to spawn background thread",v_error,m_verbose); + return false; + } m_data->num_threads++; + m_data->sc_vars.Add("Clients", SlowControlElementType::INFO, nullptr, nullptr); // INFO type doesnt need read fnct + return true; } @@ -67,7 +74,7 @@ void SocketManager::Thread(Thread_args* args){ SocketManager_args* m_args = dynamic_cast(args); m_args->last_update = std::chrono::steady_clock::now(); - //m_args->m_data->Log("checking for new clients",22); //FIXME + //printf("SocketManager checking for new clients\n"); bool new_clients=false; @@ -77,12 +84,13 @@ void SocketManager::Thread(Thread_args* args){ ManagedSocket* sock = mgd_sock.second; std::unique_lock locker(sock->socket_mtx); - int new_conn_count = (sock->connections.size() - m_args->daq_utils->UpdateConnections(sock->service_name, sock->socket, sock->connections, "", sock->port_name)); + int new_conn_count = std::abs((long long int)sock->connections.size() - m_args->daq_utils->UpdateConnections(sock->service_name, sock->socket, sock->connections, "", sock->remote_port_name)); locker.unlock(); if(new_conn_count!=0){ - new_clients = true; //m_args->m_data->services->SendLog(m_tool_name+": "+std::to_string(std::abs(new_conn_count))+" new connections to "+sock->service_name, v_message); // FIXME logging + printf("%d new %s connections made!\n",new_conn_count, sock->remote_port_name.c_str()); + new_clients = true; // update the list of clients so they can be queried for(std::pair& aservice : sock->connections){ diff --git a/UserTools/WriteQueryReceiver/WriteQueryReceiver.cpp b/UserTools/WriteQueryReceiver/WriteQueryReceiver.cpp index d7c9b7b..df7aaae 100644 --- a/UserTools/WriteQueryReceiver/WriteQueryReceiver.cpp +++ b/UserTools/WriteQueryReceiver/WriteQueryReceiver.cpp @@ -16,7 +16,7 @@ bool WriteQueryReceiver::Initialise(std::string configfile, DataModel &data){ // am_master = true; // FIXME not sure being used any more m_verbose=1; - port_name = "db_write"; + remote_port_name = "db_write"; // FIXME do these timeouts need to be << transfer_period_ms? int poll_timeout_ms = 500; int rcv_timeout_ms = 500; @@ -26,7 +26,7 @@ bool WriteQueryReceiver::Initialise(std::string configfile, DataModel &data){ int conns_backlog=1000; // FIXME sufficient? m_variables.Get("verbose",m_verbose); - m_variables.Get("port_name", port_name); + m_variables.Get("remote_port_name", remote_port_name); m_variables.Get("rcv_hwm", rcv_hwm); // max num outstanding messages in receive buffer m_variables.Get("conns_backlog", conns_backlog); // max num oustanding connection requests m_variables.Get("poll_timeout_ms",poll_timeout_ms); @@ -48,7 +48,7 @@ bool WriteQueryReceiver::Initialise(std::string configfile, DataModel &data){ // ------------------------------------------------------- ManagedSocket* managed_socket = new ManagedSocket; managed_socket->service_name=""; // attach to any client type... - managed_socket->port_name = port_name; // ...that advertises a service on port 'port_name' + managed_socket->remote_port_name = remote_port_name; // ...that advertises a service on port 'remote_port_name' managed_socket->socket = new zmq::socket_t(*m_data->context, ZMQ_SUB); // this socket never sends, so a send timeout is irrelevant. managed_socket->socket->setsockopt(ZMQ_RCVTIMEO, rcv_timeout_ms); @@ -60,7 +60,7 @@ bool WriteQueryReceiver::Initialise(std::string configfile, DataModel &data){ // add the socket to the datamodel for the SocketManager, which will handle making new connections to clients std::unique_lock locker(m_data->managed_sockets_mtx); - m_data->managed_sockets[port_name] = managed_socket; + m_data->managed_sockets[remote_port_name] = managed_socket; /* ----------------------------------------- */ /* Thread Setup */ @@ -80,9 +80,14 @@ bool WriteQueryReceiver::Initialise(std::string configfile, DataModel &data){ thread_args.in_local_queue = m_data->querybatch_pool.GetNew(local_buffer_size); thread_args.local_buffer_size = local_buffer_size; thread_args.transfer_period_ms = std::chrono::milliseconds{transfer_period_ms}; + thread_args.last_transfer = std::chrono::steady_clock::now(); thread_args.make_new = true; - m_data->utils.CreateThread("write_query_receiver", &Thread, &thread_args); // thread needs a unique name + // thread needs a unique name + if(!m_data->utils.CreateThread("write_query_receiver", &Thread, &thread_args)){ + Log(m_tool_name+": Failed to spawn background thread",v_error,m_verbose); + return false; + } m_data->num_threads++; return true; @@ -118,10 +123,10 @@ bool WriteQueryReceiver::Finalise(){ std::cerr<<"WriteReceiver thread terminated"<num_threads--; - if(m_data->managed_sockets.count(port_name)){ + if(m_data->managed_sockets.count(remote_port_name)){ std::unique_lock locker(m_data->managed_sockets_mtx); - ManagedSocket* sock = m_data->managed_sockets[port_name]; - m_data->managed_sockets.erase(port_name); + ManagedSocket* sock = m_data->managed_sockets[remote_port_name]; + m_data->managed_sockets.erase(remote_port_name); locker.unlock(); if(sock->socket) delete sock->socket; // destructor closes socket delete sock; @@ -141,13 +146,13 @@ void WriteQueryReceiver::Thread(Thread_args* args){ // transfer to datamodel // ===================== if(m_args->in_local_queue->queries.size() >= m_args->local_buffer_size || - (m_args->last_transfer - std::chrono::steady_clock::now()) > m_args->transfer_period_ms){ + (std::chrono::steady_clock::now() - m_args->last_transfer) > m_args->transfer_period_ms){ - if(!m_args->make_new) m_args->in_local_queue->queries.pop_back(); if(!m_args->in_local_queue->queries.empty()){ - std::clog<m_tool_name<<": adding "<in_local_queue->queries.size() - <<" messages to datamodel"<make_new) m_args->in_local_queue->queries.pop_back(); + + printf("%s adding %ld messages to datamodel\n",m_args->m_tool_name.c_str(),m_args->in_local_queue->queries.size()); std::unique_lock locker(m_args->m_data->write_msg_queue_mtx); m_args->m_data->write_msg_queue.push_back(m_args->in_local_queue); @@ -155,12 +160,13 @@ void WriteQueryReceiver::Thread(Thread_args* args){ m_args->in_local_queue = m_args->m_data->querybatch_pool.GetNew(m_args->local_buffer_size); - m_args->last_transfer = std::chrono::steady_clock::now(); m_args->make_new=true; ++(m_args->monitoring_vars->in_buffer_transfers); } + m_args->last_transfer = std::chrono::steady_clock::now(); + } // poll @@ -180,7 +186,7 @@ void WriteQueryReceiver::Thread(Thread_args* args){ } catch(zmq::error_t& err){ // ignore poll aborting due to signals if(zmq_errno()==EINTR) return; - //std::cerr<m_tool_name<<" poll caught "<m_tool_name<<" poll caught "<monitoring_vars->polls_failed); // m_args->running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? return; @@ -199,7 +205,7 @@ void WriteQueryReceiver::Thread(Thread_args* args){ // read // ==== if(m_args->poll.revents & ZMQ_POLLIN){ - std::clog<m_tool_name<<": receiving message"<m_tool_name.c_str()); if(m_args->make_new){ m_args->in_local_queue->queries.emplace_back(); // FIXME we could resize(local_buffer_size) on retreive new @@ -210,51 +216,47 @@ void WriteQueryReceiver::Thread(Thread_args* args){ // received parts are [topic, client, msg_id, query] // reorder parts on receipt as client and msg_id will be left untouched and re-used for response static constexpr char part_order[4] = {2,0,1,3}; + m_args->msg_parts=0; try { - // receive expected 4 parts std::unique_lock locker(*m_args->socket_mtx); - for(m_args->msg_parts=0; m_args->msg_parts<4; ++m_args->msg_parts){ - - m_args->get_ok = m_args->socket->recv(&msg_buf[part_order[m_args->msg_parts]]); - - if(!m_args->get_ok){ - std::cerr<m_tool_name<<" receive failed with "<monitoring_vars->rcv_fails); -// m_args->running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? - break; - } - - if(!msg_buf[part_order[m_args->msg_parts]].more()) break; - + printf("%s receiving part...",m_args->m_tool_name.c_str()); + do { + m_args->get_ok = m_args->socket->recv(&msg_buf[part_order[std::min(3,m_args->msg_parts++)]]); + printf("%d=%d (more: %d),...",m_args->msg_parts,m_args->get_ok,msg_buf[part_order[std::min(3,m_args->msg_parts-1)]].more()); + } while(m_args->get_ok && msg_buf[part_order[std::min(3,m_args->msg_parts-1)]].more()); + locker.unlock(); + printf("\n"); + + // if receive failed, discard the message + if(!m_args->get_ok){ + std::cerr<m_tool_name<<": receive failed with "<monitoring_vars->rcv_fails); +// m_args->running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? + return; } - // if there are more than 4 parts, read the remainder to flush the buffer, but discard the message - if(m_args->get_ok && msg_buf[3].more()){ - while(true){ - m_args->socket->recv(&m_args->msg_discard); - ++m_args->msg_parts; - } + // if there weren't 4 parts, discard the message + if(m_args->msg_parts!=4){ std::cerr<m_tool_name<<": Unexpected "<msg_parts<<" part message"<msg_parts; ++i){ + char msg_str[msg_buf[part_order[i]].size()]; + snprintf(&msg_str[0], msg_buf[part_order[i]].size()+1, "%s", msg_buf[part_order[i]].data()); + printf("\tpart %d: %s\n",i, msg_str); + } ++(m_args->monitoring_vars->bad_msgs); return; } - // if receive failed, discard the message - if(!m_args->get_ok){ - std::cerr<m_tool_name<<": receive failed with "<monitoring_vars->rcv_fails); - return; - } - // else success m_args->make_new=true; ++(m_args->monitoring_vars->msgs_rcvd); + printf("%s received query %u, '%s' message '%s' into ZmqQuery at %p, %p\n",m_args->m_tool_name.c_str(), msg_buf.msg_id(), msg_buf.topic().data(), msg_buf.msg().data(), &msg_buf, &msg_buf.parts[3]); } catch(zmq::error_t& err){ // receive aborted due to signals? diff --git a/UserTools/WriteQueryReceiver/WriteQueryReceiver.h b/UserTools/WriteQueryReceiver/WriteQueryReceiver.h index 31c176e..4991d25 100644 --- a/UserTools/WriteQueryReceiver/WriteQueryReceiver.h +++ b/UserTools/WriteQueryReceiver/WriteQueryReceiver.h @@ -54,7 +54,7 @@ class WriteQueryReceiver: public Tool { WriteQueryReceiver_args thread_args; WriteReceiveMonitoring monitoring_vars; - std::string port_name; // name by which clients advertise sockets for sending write queries to the DB + std::string remote_port_name; // name by which clients advertise sockets for sending write queries to the DB bool am_master; //bool Promote(); ///< Connect to clients to start receiving messages, if we became master diff --git a/UserTools/WriteWorkers/WriteWorkers.cpp b/UserTools/WriteWorkers/WriteWorkers.cpp index 6fa4d40..d3f15f2 100644 --- a/UserTools/WriteWorkers/WriteWorkers.cpp +++ b/UserTools/WriteWorkers/WriteWorkers.cpp @@ -20,7 +20,10 @@ bool WriteWorkers::Initialise(std::string configfile, DataModel &data){ thread_args.m_data = m_data; thread_args.monitoring_vars = &monitoring_vars; - m_data->utils.CreateThread("write_job_distributor", &Thread, &thread_args); + if(!m_data->utils.CreateThread("write_job_distributor", &Thread, &thread_args)){ + Log(m_tool_name+": Failed to spawn background thread",v_error,m_verbose); + return false; + } m_data->num_threads++; return true; @@ -93,6 +96,7 @@ void WriteWorkers::Thread(Thread_args* args){ job_data->local_msg_queue = m_args->local_msg_queue[i]; job_data->m_job_name = "write_worker"; + printf("spawning %s job\n", job_data->m_job_name.c_str()); the_job->func = WriteMessageJob; the_job->fail_func = WriteMessageFail; @@ -144,6 +148,8 @@ bool WriteWorkers::WriteMessageJob(void*& arg){ WriteJobStruct* m_args = static_cast(arg); + printf("%s job processing %d queries\n", m_args->m_job_name.c_str(), m_args->local_msg_queue->queries.size()); + m_args->local_msg_queue->reset(); // pull next query from batch @@ -181,7 +187,7 @@ bool WriteWorkers::WriteMessageJob(void*& arg){ m_args->out_buffer = &m_args->local_msg_queue->plotlyplot_buffer; break; case query_topic::rootplot: - m_args->out_buffer = &m_args->local_msg_queue->rooplot_buffer; + m_args->out_buffer = &m_args->local_msg_queue->rootplot_buffer; break; case query_topic::generic: // these can't be buffered, just note their indices for the DB workers @@ -189,7 +195,7 @@ bool WriteWorkers::WriteMessageJob(void*& arg){ continue; break; default: - //std::cerr<<"unrecognised topic"<local_msg_queue->close(); + // pass the batch onto the next stage of the pipeline for the DatabaseWorkers std::unique_lock locker(m_args->m_data->write_query_queue_mtx); m_args->m_data->write_query_queue.push_back(m_args->local_msg_queue); locker.unlock(); - std::cerr<m_job_name<<" completed"<m_job_name.c_str()); ++(m_args->monitoring_vars->jobs_completed); // return our job args to the pool diff --git a/configfiles/Dummy/ToolChainConfig b/configfiles/Dummy/ToolChainConfig index 7f0a1a0..533cb97 100644 --- a/configfiles/Dummy/ToolChainConfig +++ b/configfiles/Dummy/ToolChainConfig @@ -18,8 +18,8 @@ log_interactive 1 # Interactive=cout; 0=false, 1= true log_local 1 # Local = local file log; 0=false, 1= true log_local_path ./log # file to store logs to if local is active log_remote 1 # Remote= remote logging system "serservice_name Remote_Logging"; 0=false, 1= true -log_address 239.192.1.1 # Remote multicast address to send logs -log_port 55554 # port on remote machine to connect to +log_address 239.192.1.2 # Remote multicast address to send logs +log_port 5000 # port on remote machine to connect to log_append_time 0 # append seconds since epoch to filename; 0=false, 1= true log_split_files 1 # seperate output and error log files (named x.o and x.e) @@ -46,8 +46,8 @@ clt_dlr_socket_timeout 500 # inpoll_timeout 50 # keep these short! outpoll_timeout 50 # keep these short! command_timeout 2000 # -multicast_port 55554 # -multicast_address 239.192.1.1 # +mon_port 5000 # +mon_address 239.192.1.3 # ##### Tools To Add ##### Tools_File configfiles/Dummy/ToolsConfig # list of tools to run and their config files diff --git a/configfiles/middleman/ToolChainConfig b/configfiles/middleman/ToolChainConfig index 8beca02..107c0de 100644 --- a/configfiles/middleman/ToolChainConfig +++ b/configfiles/middleman/ToolChainConfig @@ -8,34 +8,32 @@ attempt_recover 1 # 1= will attempt to finalise if an execute fails, 0= will n remote_port 24002 # port to open for remote commands if running in remote mode IO_Threads 1 # Number of threads for network traffic (~ 1/Gbps) alerts_send 1 # enable ability to send global alerts -alert_send_port 12242 # port to send global alerts +alert_send_port 12252 # port to send global alerts alerts_receive 1 # enable ability to receive global alerts -alert_receive_port 12243 # port to receive global alerts -sc_port 60000 # port for slow control +alert_receive_port 12253 # port to receive global alerts +sc_port 65000 # port for slow control ###### Logging ##### log_interactive 1 # Interactive=cout; 0=false, 1= true log_local 0 # Local = local file log; 0=false, 1= true log_local_path ./log # file to store logs to if local is active log_remote 0 # Remote= remote logging system "serservice_name Remote_Logging"; 0=false, 1= true -log_address 239.192.1.1 # Remote multicast address to send logs -log_port 5001 # port on remote machine to connect to +log_address 239.192.1.2 # Remote multicast address to send logs +log_port 5000 # port on remote machine to connect to log_append_time 0 # append seconds since epoch to filename; 0=false, 1= true log_split_files 0 # seperate output and error log files (named x.o and x.e) ###### Service discovery ##### Ignore these settings for local analysis service_discovery_address 239.192.1.1 # multicast address to use for service discovery service_discovery_port 5000 # port to use for service discovery -service_name ToolDAQ_Service # name of Toolchain service to braodcast +service_name middleman # name of Toolchain service to braodcast service_publish_sec 5 # heartbeat send period service_kick_sec 60 # remove hosts with no heartbeat after given period ###### Backend Services ##### NEWLY ADDED NEEDS TO HAVE PROPER DESCRIPTIONS AND SOME PRUNING BEFORE RELEASE -use_backend_services 1 # -db_name daq # +use_backend_services 0 # verbosity 1 # max_retries 3 # -advertise_endpoints 0 # resend_period_ms 1000 # print_stats_period_ms 1000 # clt_pub_port 55556 # @@ -45,8 +43,8 @@ clt_dlr_socket_timeout 500 # inpoll_timeout 50 # keep these short! outpoll_timeout 50 # keep these short! command_timeout 2000 # -multicast_port 55554 # -multicast_address 239.192.1.1 # +mon_port 5000 # +mon_address 239.192.1.3 # ##### Tools To Add ##### Tools_File configfiles/middleman/ToolsConfig # list of tools to run and their config files diff --git a/configfiles/middleman/WriteQueryReceiverConfig b/configfiles/middleman/WriteQueryReceiverConfig index 64a812d..0921e2b 100644 --- a/configfiles/middleman/WriteQueryReceiverConfig +++ b/configfiles/middleman/WriteQueryReceiverConfig @@ -4,6 +4,6 @@ rcv_hwm 10000 conns_backlog 30000 rcv_timeout_ms 10 poll_timeout_ms 10 -transfer_period_ms 200 local_buffer_size 200 +transfer_period_ms 200 #am_master 1 diff --git a/configfiles/template/ToolChainConfig b/configfiles/template/ToolChainConfig index d26b67a..badb1c3 100644 --- a/configfiles/template/ToolChainConfig +++ b/configfiles/template/ToolChainConfig @@ -18,8 +18,8 @@ log_interactive 1 # Interactive=cout; 0=false, 1= true log_local 0 # Local = local file log; 0=false, 1= true log_local_path ./log # file to store logs to if local is active log_remote 0 # Remote= remote logging system "serservice_name Remote_Logging"; 0=false, 1= true -log_address 239.192.1.1 # Remote multicast address to send logs -log_port 5001 # port on remote machine to connect to +log_address 239.192.1.2 # Remote multicast address to send logs +log_port 5000 # port on remote machine to connect to log_append_time 0 # append seconds since epoch to filename; 0=false, 1= true log_split_files 0 # seperate output and error log files (named x.o and x.e) @@ -45,8 +45,8 @@ clt_dlr_socket_timeout 500 # inpoll_timeout 50 # keep these short! outpoll_timeout 50 # keep these short! command_timeout 2000 # -multicast_port 55554 # -multicast_address 239.192.1.1 # +mon_port 5000 # +mon_address 239.192.1.3 # ##### Tools To Add ##### Tools_File configfiles/ToolsConfig # list of tools to run and their config files From 645974a21296cc7d2487243a8c89ecfdd97119d7 Mon Sep 17 00:00:00 2001 From: Marcus O'Flaherty Date: Mon, 5 Jan 2026 12:30:39 +0000 Subject: [PATCH 06/12] add some error handling (query re-submission) in DatabaseWorkers --- DataModel/DataModel.h | 4 +- DataModel/QueryBatch.h | 6 +- UserTools/DatabaseWorkers/DatabaseWorkers.cpp | 628 +++++++++++------- UserTools/DatabaseWorkers/DatabaseWorkers.h | 27 + UserTools/ResultWorkers/ResultWorkers.cpp | 6 +- UserTools/WriteWorkers/WriteWorkers.cpp | 2 +- 6 files changed, 417 insertions(+), 256 deletions(-) diff --git a/DataModel/DataModel.h b/DataModel/DataModel.h index 68c7d9f..8a20ced 100644 --- a/DataModel/DataModel.h +++ b/DataModel/DataModel.h @@ -127,8 +127,8 @@ class DataModel : public DAQDataModelBase { /* DatabaseWorkers */ /* ----------------------------------------- */ - std::vector read_replies; // output, awaiting for result conversion - std::mutex read_replies_mtx; + std::vector query_results; // output, awaiting for result conversion + std::mutex query_results_mtx; private: diff --git a/DataModel/QueryBatch.h b/DataModel/QueryBatch.h index 962f468..ed96926 100644 --- a/DataModel/QueryBatch.h +++ b/DataModel/QueryBatch.h @@ -23,7 +23,7 @@ struct QueryBatch { std::string rootplot_buffer; // flagged for can't be batch inserted by workers - std::vector generic_write_query_indices; + std::vector generic_query_indices; // set by database workers after batch insert std::vector devconfig_version_nums; @@ -52,7 +52,7 @@ struct QueryBatch { calibration_version_nums.clear(); plotlyplot_version_nums.clear(); rootplot_version_nums.clear(); - generic_write_query_indices.clear(); + generic_query_indices.clear(); alarm_batch_err.clear(); devconfig_batch_err.clear(); @@ -88,7 +88,7 @@ struct QueryBatch { bool got_calibrations() const { return !calibration_buffer.empty(); } bool got_plotlyplots() const { return !plotlyplot_buffer.empty(); } bool got_rootplots() const { return !rootplot_buffer.empty(); } - bool got_generics() const { return !generic_write_query_indices.empty(); } + bool got_generics() const { return !generic_query_indices.empty(); } }; diff --git a/UserTools/DatabaseWorkers/DatabaseWorkers.cpp b/UserTools/DatabaseWorkers/DatabaseWorkers.cpp index 363d9fe..4f7a091 100644 --- a/UserTools/DatabaseWorkers/DatabaseWorkers.cpp +++ b/UserTools/DatabaseWorkers/DatabaseWorkers.cpp @@ -322,319 +322,453 @@ bool DatabaseWorkers::DatabaseJob(void*& arg){ // FIXME if the DB goes down, implement some sort of pausing(?) or local recording to local disk (SQLite?) - // we also use a single transaction for all queries, so open that now + // we use a single transaction for all queries, so open that now pqxx::work* tx = new pqxx::work(*conn.get()); // aka pqxx::transaction<> -// pqxx::substransaction sub(tx); // aka create savepoint and rollback on error. possibly harmful for performance... - // ok, so the problem with this is any query that fails within a transaction subsequently throws: - // 'current transaction is aborted, commands ignored until end of transaction block' - // for any further use, so we need to handle that. + // start with the read queries. + // since these don't actually modify the database, if any query or the final 'commit' fails, + // any preceding queries should already have their results, so we don't need to re-do them. - // insert new logging statements - printf("calling prepped for %d logging batches\n",m_args->logging_queue.size()); - for(std::string& batch : m_args->logging_queue){ - printf("dbworker inserting logging batch: '%s'\n",batch.c_str()); - try { - tx->exec(pqxx::prepped{"logging_insert"}, pqxx::params{batch}); - ++(m_args->monitoring_vars->logging_submissions); - } catch (std::exception& e){ - std::cerr<<"dbworker log insert failed with "<monitoring_vars->logging_submissions_failed); - // FIXME log the error here - // FIXME if we catch (pqxx::sql_error const &e) or others can we get better information? - // after error the transaction becomes unusable, and we must open a new one - delete tx; - tx = new pqxx::work(*conn.get()); - } - } + // each batch contains a vector of queries, but unlike inserts, we can't batch these + // as we need the results from each and i'm not sure how we'd tell them apart if we batch submitted. + // for giggles, we'll pipeline them. This may even improve performance. - // insert new monitoring statements - printf("calling prepped for %d monitoring batches\n",m_args->monitoring_queue.size()); - for(std::string& batch : m_args->monitoring_queue){ - try { - tx->exec(pqxx::prepped{"monitoring_insert"}, pqxx::params{batch}); - ++(m_args->monitoring_vars->monitoring_submissions); - } catch (std::exception& e){ - ++(m_args->monitoring_vars->monitoring_submissions_failed); - std::cerr<<"dbworker mon insert failed with "<read_queue.size()); + for(QueryBatch* batch : m_args->read_queue){ + + printf("pipelining batch of %d read queries\n",batch->queries.size()); + + // if a query in the pipeline fails, all subsequent queries will also fail + // so we'll need to go back and re-submit them. + // Keep track of where we got to in case we need to do this. + m_args->last_i=0; + + do { + m_args->ids.clear(); + m_args->pipeline_error=false; + + // XXX set the pipeline to retain 1/2 the queries we're going to insert before pushing to backend? + px->retain((batch->queries.size() - m_args->last_i)/2); + + // push the queries to the DB + for(size_t i=m_args->last_i; iqueries.size(); ++i){ + m_args->ids.push_back(px->insert(batch->queries[i].msg())); + } + + // pull the results + for(size_t i=0; iids.size(); ++i){ + ZmqQuery& query = batch->queries[i+m_args->last_i]; + try { + // XXX retrieving a given id blocks until that result is available + // perhaps we could check is_finished(id) and if not, pull other results while we wait + // not sure if this would be faster, but it would certainly be more complex + query.result = px->retrieve(m_args->ids[i]); + ++(m_args->monitoring_vars->readquery_submissions); + } catch (std::exception& e){ + ++(m_args->monitoring_vars->readquery_submissions_failed); + query.result.clear(); // this sets `m_query=nullptr` so maybe we can use that as a check...? FIXME + query.err = current_exception_name()+": "+e.what(); // store info about what failed + std::cerr<<"dbworker read query '"<pipeline_error = true; + m_args->last_i += i+1; + + // pipeline::flush docs say "a backend transaction is aborted automatically when an error occurs" + delete tx; + tx = new pqxx::work(*conn.get()); + + // and we need a new pipeline too + delete px; + px = new pqxx::pipeline(*tx); + + break; + } + } + + } while(m_args->pipeline_error); + + // sanity check + if(!px->empty()){ + // pipeline is somehow still not empty even after we should have retrieved everything...?? + std::cerr<<"dbworker pipeline has surplus results?!"<flush(); // cancel pending queries and discard results... i guess?? } + } + // ok we're done with the pipeline: close it and detach, whatever that means. + px->complete(); - // insert new multicast rootplot statements - printf("calling prepped for %d rootplot batches\n",m_args->rootplot_queue.size()); - for(std::string& batch : m_args->rootplot_queue){ - try { - tx->exec(pqxx::prepped{"rootplots_insert"}, pqxx::params{batch}); - ++(m_args->monitoring_vars->rootplot_submissions); - } catch (std::exception& e){ - ++(m_args->monitoring_vars->rootplot_submissions_failed); - std::cerr<<"dbworker rootplot insert failed with "<read_queue.empty()){ + printf("returning %d read replies to datamodel\n", m_args->read_queue.size()); + std::unique_lock locker(m_args->m_data->query_results_mtx); + m_args->m_data->query_results.insert(m_args->m_data->query_results.end(), + m_args->read_queue.begin(),m_args->read_queue.end()); } - // insert new multicast plotlyplot statements - printf("calling prepped for %d plotlyplot batches\n",m_args->plotlyplot_queue.size()); - for(std::string& batch : m_args->plotlyplot_queue){ - try { - tx->exec(pqxx::prepped{"plotlyplots_insert"}, pqxx::params{batch}); - ++(m_args->monitoring_vars->plotlyplot_submissions); - } catch (std::exception& e){ - ++(m_args->monitoring_vars->plotlyplot_submissions_failed); - std::cerr<<"dbworker plotlyplot insert failed with "<write_queue.size()); - for(QueryBatch* batch : m_args->write_queue){ - // the batch gets split up by WriteWorkers into a buffer for each type of write query + m_args->last_i=0; + + do { - // alarm insertions return nothing, just catch errors - if(batch->got_alarms()){ - printf("calling prepped for alarm buffer '%s'\n",batch->alarm_buffer.c_str()); - try { - tx->exec(pqxx::prepped{"alarms_insert"}, pqxx::params{batch->alarm_buffer}); - ++(m_args->monitoring_vars->alarm_submissions); - } catch (std::exception& e){ - batch->alarm_batch_err = current_exception_name()+": "+e.what(); - ++(m_args->monitoring_vars->alarm_submissions_failed); - std::cerr<<"dbworker alarm batch '"<alarm_buffer<<"' insert failed with "<got_devconfigs()){ - printf("calling prepped for dev_config buffer '%s'\n",batch->devconfig_buffer.c_str()); - try { - tx->for_query(pqxx::prepped{"device_config_insert"}, - [&batch](int32_t new_version_num){ - batch->devconfig_version_nums.push_back(new_version_num); - }, pqxx::params{batch->devconfig_buffer}); - ++(m_args->monitoring_vars->devconfig_submissions); - } catch (std::exception& e){ - ++(m_args->monitoring_vars->devconfig_submissions_failed); - batch->devconfig_batch_err = current_exception_name()+": "+e.what(); - std::cerr<<"dbworker devconfig insert '"<devconfig_buffer<<"' failed with "<last_i = (m_args->endpoint==DatabaseJobStep::generics) ? m_args->endpoint_i : m_args->write_queue.size(); + + for(size_t i=0; ilast_i; ++i){ + QueryBatch* batch = m_args->write_queue[i]; + printf("executing %d generic queries for next batch\n",batch->generic_query_indices.size()); + size_t last_j = (m_args->endpoint==DatabaseJobStep::logging) ? m_args->endpoint_j : batch->generic_query_indices.size(); + for(size_t j=m_args->checkpoint_j; jqueries[batch->generic_query_indices[j]]; + if(!query.err.empty()) continue; // skip queries flagged bad on a previous iteration + try { + query.result = tx->exec(query.msg()); + ++(m_args->monitoring_vars->generic_submissions); + } catch (std::exception& e){ + ++(m_args->monitoring_vars->generic_submissions_failed); + query.result.clear(); + query.err = current_exception_name()+": "+e.what(); + std::cerr<<"dbworker generic query '"<(&e); + //if(sqle) std::cerr<<"SQLSTATE is now "<sqlstate()<checkpoint_i = i+1; + m_args->checkpoint_j = j; + m_args->had_error=true; + delete tx; + tx = new pqxx::work(*conn.get()); + } } } - // run config insertions - if(batch->got_runconfigs()){ - printf("calling prepped for run_config buffer '%s'\n",batch->runconfig_buffer.c_str()); + if(!m_args->had_error){ + if(m_args->endpoint==DatabaseJobStep::logging) goto commitit; + m_args->checkpoint = DatabaseJobStep::logging; + } + + // insert new logging statements + m_args->last_i = (m_args->endpoint==DatabaseJobStep::logging) ? m_args->endpoint_i : m_args->logging_queue.size(); + + printf("calling prepped for %d logging batches\n",m_args->logging_queue.size()); + for(size_t i=0; ilast_i; ++i){ + if(m_args->bad_logs.count(i)) continue; + std::string& batch = m_args->logging_queue[i]; + printf("dbworker inserting logging batch: '%s'\n",batch.c_str()); try { - tx->for_query(pqxx::prepped{"run_config_insert"}, - [&batch](int32_t new_version_num){ - batch->runconfig_version_nums.push_back(new_version_num); - }, pqxx::params{batch->runconfig_buffer}); - ++(m_args->monitoring_vars->runconfig_submissions); + tx->exec(pqxx::prepped{"logging_insert"}, pqxx::params{batch}); + ++(m_args->monitoring_vars->logging_submissions); } catch (std::exception& e){ - ++(m_args->monitoring_vars->runconfig_submissions_failed); - batch->runconfig_batch_err = current_exception_name()+": "+e.what(); - std::cerr<<"dbworker runconfig insert '"<runconfig_buffer<<"' failed with "<monitoring_vars->logging_submissions_failed); // FIXME log the error here + // FIXME if we catch (pqxx::sql_error const &e) or others can we get better information? + // after error the transaction becomes unusable, and we must open a new one + m_args->bad_logs.emplace(i); + m_args->checkpoint_i = i; + m_args->had_error=true; delete tx; tx = new pqxx::work(*conn.get()); } } + if(!m_args->had_error){ + if(m_args->endpoint==DatabaseJobStep::monitoring) goto commitit; + m_args->checkpoint = DatabaseJobStep::monitoring; + } + + m_args->last_i = (m_args->endpoint==DatabaseJobStep::monitoring) ? m_args->endpoint_i : m_args->monitoring_queue.size(); - // calibration data insertions - if(batch->got_calibrations()){ - printf("calling prepped for calibration buffer '%s'\n",batch->calibration_buffer.c_str()); + // insert new monitoring statements + printf("calling prepped for %d monitoring batches\n",m_args->monitoring_queue.size()); + for(size_t i=0; ilast_i; ++i){ + if(m_args->bad_mons.count(i)) continue; + std::string& batch = m_args->monitoring_queue[i]; try { - tx->for_query(pqxx::prepped{"calibration_insert"}, - [&batch](int32_t new_version_num){ - batch->calibration_version_nums.push_back(new_version_num); - }, pqxx::params{batch->calibration_buffer}); - ++(m_args->monitoring_vars->calibration_submissions); + tx->exec(pqxx::prepped{"monitoring_insert"}, pqxx::params{batch}); + ++(m_args->monitoring_vars->monitoring_submissions); } catch (std::exception& e){ - ++(m_args->monitoring_vars->calibration_submissions_failed); - batch->calibration_batch_err = current_exception_name()+": "+e.what(); - std::cerr<<"dbworker calibration insert '"<calibration_buffer<<"' failed with "<monitoring_vars->monitoring_submissions_failed); + std::cerr<<"dbworker mon insert failed with "<bad_mons.emplace(i); + m_args->checkpoint_i = i; + m_args->had_error=true; delete tx; tx = new pqxx::work(*conn.get()); } } + if(!m_args->had_error){ + if(m_args->endpoint==DatabaseJobStep::rootplots) goto commitit; + m_args->checkpoint = DatabaseJobStep::rootplots; + } + + m_args->last_i = (m_args->endpoint==DatabaseJobStep::rootplots) ? m_args->endpoint_i : m_args->rootplot_queue.size(); - // rootplot insertions - if(batch->got_rootplots()){ - printf("calling prepped for rootplots buffer '%s'\n",batch->rootplot_buffer.c_str()); + // insert new multicast rootplot statements + printf("calling prepped for %d rootplot batches\n",m_args->rootplot_queue.size()); + for(size_t i=0; ilast_i; ++i){ + if(m_args->bad_rootplots.count(i)) continue; + std::string& batch = m_args->rootplot_queue[i]; try { - tx->for_query(pqxx::prepped{"rootplots_insert"}, - [&batch](int32_t new_version_num){ - batch->rootplot_version_nums.push_back(new_version_num); - }, pqxx::params{batch->rootplot_buffer}); + tx->exec(pqxx::prepped{"rootplots_insert"}, pqxx::params{batch}); ++(m_args->monitoring_vars->rootplot_submissions); } catch (std::exception& e){ ++(m_args->monitoring_vars->rootplot_submissions_failed); - batch->rootplot_batch_err = current_exception_name()+": "+e.what(); - std::cerr<<"dbworker rootplot insert '"<rootplot_buffer<<"' failed with "<bad_rootplots.emplace(i); + m_args->checkpoint_i = i; + m_args->had_error=true; delete tx; tx = new pqxx::work(*conn.get()); } } + if(!m_args->had_error){ + if(m_args->endpoint==DatabaseJobStep::plotlyplots) goto commitit; + m_args->checkpoint = DatabaseJobStep::plotlyplots; + } - // plotlyplot insertions - if(batch->got_plotlyplots()){ - printf("calling prepped for plotlyplots buffer '%s'\n",batch->plotlyplot_buffer.c_str()); + m_args->last_i = (m_args->endpoint==DatabaseJobStep::plotlyplots) ? m_args->endpoint_i : m_args->plotlyplot_queue.size(); + + // insert new multicast plotlyplot statements + printf("calling prepped for %d plotlyplot batches\n",m_args->plotlyplot_queue.size()); + for(size_t i=0; ilast_i; ++i){ + if(m_args->bad_plotlyplots.count(i)) continue; + std::string& batch = m_args->plotlyplot_queue[i]; try { - tx->for_query(pqxx::prepped{"plotlyplots_insert"}, - [&batch](int32_t new_version_num){ - batch->plotlyplot_version_nums.push_back(new_version_num); - }, pqxx::params{batch->plotlyplot_buffer}); + tx->exec(pqxx::prepped{"plotlyplots_insert"}, pqxx::params{batch}); ++(m_args->monitoring_vars->plotlyplot_submissions); } catch (std::exception& e){ ++(m_args->monitoring_vars->plotlyplot_submissions_failed); - batch->plotlyplot_batch_err = current_exception_name()+": "+e.what(); - std::cerr<<"dbworker plotlyplot insert '"<plotlyplot_buffer<<"' failed with "<bad_plotlyplots.emplace(i); + m_args->checkpoint_i = i; + m_args->had_error=true; delete tx; tx = new pqxx::work(*conn.get()); } } - - // generic query insertions - // we can't batch these as they're just arbitrary SQL from the user, - // so we need to loop over them. - // FIXME no performance optimisation here: don't expect there to be many... right? - // if there's a lot we could use a pipeline as below...but the overhead may not be worth it - printf("serially executing %d generic queries\n",batch->generic_write_query_indices.size()); - for(size_t i : batch->generic_write_query_indices){ - ZmqQuery& query = batch->queries[i]; - try { - query.result = tx->exec(query.msg()); - ++(m_args->monitoring_vars->generic_submissions); - } catch (std::exception& e){ - ++(m_args->monitoring_vars->generic_submissions_failed); - query.result.clear(); - query.err = current_exception_name()+": "+e.what(); - std::cerr<<"dbworker generic query '"<(&e); - //if(sqle) std::cerr<<"SQLSTATE is now "<sqlstate()<had_error){ + if(m_args->endpoint==DatabaseJobStep::writes) goto commitit; + m_args->checkpoint = DatabaseJobStep::writes; } - } - - // read queries - // since these don't actually modify the database, if any query or the final 'commit' fails, - // then preceding queries should already have their results - // (and FIXME check this, maybe later retrieve calls still work too?) - // if so, we can: - // 1. move them to the start of the job, so we can salvage what ran successfully? - // the trouble with that is these queries are "less reliable" since they are not necessarily formed by us. - // perhaps we could separate out `Query` topic jobs to run at the end after the `commit` call? XXX probably this! - // 2. move these to a separate worker job? - - // each batch contains a vector of queries, but unlike inserts, we can't batch these FIXME i think? - // for giggles, we'll pipeline them. This may even improve performance. - pqxx::pipeline px(*tx); - m_args->ids.clear(); - m_args->pipeline_error = false; - printf("processing %d read query batches\n",m_args->read_queue.size()); - for(QueryBatch* batch : m_args->read_queue){ - - printf("pipelining batch of %d read queries\n",batch->queries.size()); - - // it may be best to set the pipeline to retain ~the number of queries we're going to insert, - // so that it runs them all in one. TODO or maybe do it in two halves? - px.retain(batch->queries.size()); - // push all the queries to the DB - for(ZmqQuery& query : batch->queries){ - m_args->ids.push_back(px.insert(query.msg())); - } + m_args->last_i = (m_args->endpoint==DatabaseJobStep::writes) ? m_args->endpoint_i : m_args->write_queue.size(); - // pull the results - for(size_t i=0; iqueries.size(); ++i){ - ZmqQuery& query = batch->queries[i]; - try { - if(px.empty()){ - // we should never find the pipeline empty! ... i think? - // unless this happens if we check too soon?? (i.e. no results ready *yet*?) FIXME?? - m_args->pipeline_error=true; - break; - } else { - // FIXME note that retrieving a specific id will block until that result is available - // which means a long-running query will hold up sending replies to faster ones.... - // actually, we'd have to wait on the whole pipeline before returning anyway, - // but this is an issue with batching... - query.result = px.retrieve(m_args->ids[i]); - ++(m_args->monitoring_vars->readquery_submissions); + // write queries + printf("processing %d write batches\n",m_args->write_queue.size()); + for(size_t i=0; ilast_i; ++i){ + QueryBatch* batch = m_args->write_queue[i]; + // the batch gets split up by WriteWorkers into a buffer for each type of write query + + // alarm insertions return nothing, just catch errors + if(batch->got_alarms() && batch->alarm_batch_err.empty()){ + printf("calling prepped for alarm buffer '%s'\n",batch->alarm_buffer.c_str()); + try { + tx->exec(pqxx::prepped{"alarms_insert"}, pqxx::params{batch->alarm_buffer}); + ++(m_args->monitoring_vars->alarm_submissions); + } catch (std::exception& e){ + batch->alarm_batch_err = current_exception_name()+": "+e.what(); + ++(m_args->monitoring_vars->alarm_submissions_failed); + std::cerr<<"dbworker alarm batch '"<alarm_buffer<<"' insert failed with "<checkpoint_i = i+1; + m_args->had_error=true; + delete tx; + tx = new pqxx::work(*conn.get()); + } + } + + // the remaining insertions return the new version number + // `pqxx::transaction_base::for_query` runs a query and invokes a callable for each result row + // we use this to collect the returned version numbers into a vector + // N.B. `pqxx::transaction_base::for_stream` is an alternative that is faster for large results + // but slower for small results. TODO check whether ours count as 'large' .. probably not. + + // device config insertions + if(batch->got_devconfigs() && batch->devconfig_batch_err.empty()){ + printf("calling prepped for dev_config buffer '%s'\n",batch->devconfig_buffer.c_str()); + try { + tx->for_query(pqxx::prepped{"device_config_insert"}, + [&batch](int32_t new_version_num){ + batch->devconfig_version_nums.push_back(new_version_num); + }, pqxx::params{batch->devconfig_buffer}); + ++(m_args->monitoring_vars->devconfig_submissions); + } catch (std::exception& e){ + ++(m_args->monitoring_vars->devconfig_submissions_failed); + batch->devconfig_batch_err = current_exception_name()+": "+e.what(); + std::cerr<<"dbworker devconfig insert '"<devconfig_buffer<<"' failed with "<checkpoint_i = i+1; + m_args->had_error=true; + delete tx; + tx = new pqxx::work(*conn.get()); + } + } + + // run config insertions + if(batch->got_runconfigs() && batch->runconfig_batch_err.empty()){ + printf("calling prepped for run_config buffer '%s'\n",batch->runconfig_buffer.c_str()); + try { + tx->for_query(pqxx::prepped{"run_config_insert"}, + [&batch](int32_t new_version_num){ + batch->runconfig_version_nums.push_back(new_version_num); + }, pqxx::params{batch->runconfig_buffer}); + ++(m_args->monitoring_vars->runconfig_submissions); + } catch (std::exception& e){ + ++(m_args->monitoring_vars->runconfig_submissions_failed); + batch->runconfig_batch_err = current_exception_name()+": "+e.what(); + std::cerr<<"dbworker runconfig insert '"<runconfig_buffer<<"' failed with "<checkpoint_i = i+1; + m_args->had_error=true; + delete tx; + tx = new pqxx::work(*conn.get()); + } + } + + // calibration data insertions + if(batch->got_calibrations() && batch->calibration_batch_err.empty()){ + printf("calling prepped for calibration buffer '%s'\n",batch->calibration_buffer.c_str()); + try { + tx->for_query(pqxx::prepped{"calibration_insert"}, + [&batch](int32_t new_version_num){ + batch->calibration_version_nums.push_back(new_version_num); + }, pqxx::params{batch->calibration_buffer}); + ++(m_args->monitoring_vars->calibration_submissions); + } catch (std::exception& e){ + ++(m_args->monitoring_vars->calibration_submissions_failed); + batch->calibration_batch_err = current_exception_name()+": "+e.what(); + std::cerr<<"dbworker calibration insert '"<calibration_buffer<<"' failed with "<checkpoint_i = i+1; + m_args->had_error=true; + delete tx; + tx = new pqxx::work(*conn.get()); + } + } + + // rootplot insertions + if(batch->got_rootplots() && batch->rootplot_batch_err.empty()){ + printf("calling prepped for rootplots buffer '%s'\n",batch->rootplot_buffer.c_str()); + try { + tx->for_query(pqxx::prepped{"rootplots_insert"}, + [&batch](int32_t new_version_num){ + batch->rootplot_version_nums.push_back(new_version_num); + }, pqxx::params{batch->rootplot_buffer}); + ++(m_args->monitoring_vars->rootplot_submissions); + } catch (std::exception& e){ + ++(m_args->monitoring_vars->rootplot_submissions_failed); + batch->rootplot_batch_err = current_exception_name()+": "+e.what(); + std::cerr<<"dbworker rootplot insert '"<rootplot_buffer<<"' failed with "<checkpoint_i = i+1; + m_args->had_error=true; + delete tx; + tx = new pqxx::work(*conn.get()); } - } catch (std::exception& e){ - ++(m_args->monitoring_vars->readquery_submissions_failed); - query.result.clear(); // this sets `m_query=nullptr` so maybe we can use that as a check...? FIXME - query.err = current_exception_name()+": "+e.what(); // store info about what failed - std::cerr<<"dbworker read query '"<got_plotlyplots() && batch->plotlyplot_batch_err.empty()){ + printf("calling prepped for plotlyplots buffer '%s'\n",batch->plotlyplot_buffer.c_str()); + try { + tx->for_query(pqxx::prepped{"plotlyplots_insert"}, + [&batch](int32_t new_version_num){ + batch->plotlyplot_version_nums.push_back(new_version_num); + }, pqxx::params{batch->plotlyplot_buffer}); + ++(m_args->monitoring_vars->plotlyplot_submissions); + } catch (std::exception& e){ + ++(m_args->monitoring_vars->plotlyplot_submissions_failed); + batch->plotlyplot_batch_err = current_exception_name()+": "+e.what(); + std::cerr<<"dbworker plotlyplot insert '"<plotlyplot_buffer<<"' failed with "<checkpoint_i = i+1; + m_args->had_error=true; + delete tx; + tx = new pqxx::work(*conn.get()); + } + } + } - // synchronization check - if(m_args->pipeline_error || !px.empty()){ - // either we broke early because the pipeline was empty before we got all results, - // or pipeline is not empty and we're missing some... - std::cerr<<"dbworker pipeline error retrieving all results!"<commit(); + + m_args->endpoint = m_args->checkpoint; + m_args->endpoint_i = m_args->checkpoint_i; + m_args->endpoint_j = m_args->checkpoint_j; + + } catch(pqxx::in_doubt_error& e){ + // ughhhhhhh.... + // basically this means the transaction may have commited or not, pqxx is not sure. + // it's up to us to figure that out, perhaps by querying for the last inserted record + // FIXME for now, we leave that as a problem for another day... + std::cerr<<"dbworker caught "<had_error = true; + } - } - // we're done with the pipeline: close it and detach, whatever that means. - px.complete(); - - // commit the transaction. Need to do this before it goes out of scope or the whole thing will be rolled back! - // FIXME we've already copied out vesion numbers and success statuses as we went along, but if this happens - // those statuses need to be reset!!! FIXME i guess do this in fail func? - // we therefore need to throw for ANY errors to invoke this! - try { - tx->commit(); - } catch(std::exception& e){ - // oh yeaaa, the transaction might have commited, or it might not have. awesome. - // our consolation prize is a `pqxx::in_doubt_error`. - // FIXME supposedly, it is up to us to determine whether it committed or not - // perhaps by attempting to query whether the inserted records are found... - std::cerr<<"dbworker caught "<had_error) break; + + // if something errored, the the pqxx::transaction will have aborted + // and all insertions to the database before that point (the checkpoint) will have been lost. + // so loop back to the start and re-run up to the point of last error (endpoint) + // this time skipping bad queries to hopefully avoid any errors + printf("%s encountered error, re-running up to checkpoint %d\n",m_args->m_job_name, m_args->endpoint); + m_args->had_error=false; + + } while(true); // keep trying until we've submitted everything we can. + // FIXME maybe we should add a limiter to stop one job running forever? + // FIXME we probably need better separation of error types for this + // FIXME at some point we want to also fall back to dumping to local disk if DB is inaccessible + // N.B. that will probably result in duplicates in the on-disk version if we don't record what + // committed succesfully, but that's probably easier to handle when uploading the file to DB + // e.g. with 'ON CONFLICT' or somesuch // pass the batch onto the next stage of the pipeline for the DatabaseWorkers if(!m_args->write_queue.empty()){ - // FIXME FIXME FIXME rename 'read_replies' to 'raw_replies' or something better printf("returning %d write acknowledgements to datamodel\n", m_args->write_queue.size()); - std::unique_lock locker(m_args->m_data->read_replies_mtx); - m_args->m_data->read_replies.insert(m_args->m_data->read_replies.end(), + std::unique_lock locker(m_args->m_data->query_results_mtx); + m_args->m_data->query_results.insert(m_args->m_data->query_results.end(), m_args->write_queue.begin(),m_args->write_queue.end()); } - if(!m_args->read_queue.empty()){ - printf("returning %d read replies to datamodel\n", m_args->read_queue.size()); - std::unique_lock locker(m_args->m_data->read_replies_mtx); - m_args->m_data->read_replies.insert(m_args->m_data->read_replies.end(), - m_args->read_queue.begin(),m_args->read_queue.end()); - } - printf("%s completed\n",m_args->m_job_name.c_str()); ++(m_args->monitoring_vars->jobs_completed); diff --git a/UserTools/DatabaseWorkers/DatabaseWorkers.h b/UserTools/DatabaseWorkers/DatabaseWorkers.h index 7c8dbab..8c1e5a6 100644 --- a/UserTools/DatabaseWorkers/DatabaseWorkers.h +++ b/UserTools/DatabaseWorkers/DatabaseWorkers.h @@ -2,6 +2,7 @@ #define DatabaseWorkers_H #include +#include #include "Tool.h" #include "DataModel.h" @@ -18,6 +19,8 @@ * Contact: marcus.o-flaherty@warwick.ac.uk */ +enum class DatabaseJobStep { generics, logging, monitoring, rootplots, plotlyplots, writes, finish }; + struct DatabaseJobStruct { DatabaseJobStruct(Pool* pool, DataModel* data, DatabaseWorkerMonitoring* mon) : m_pool(pool), m_data(data), monitoring_vars(mon){}; @@ -33,9 +36,24 @@ struct DatabaseJobStruct { std::vector rootplot_queue; std::vector plotlyplot_queue; + std::set bad_logs; + std::set bad_mons; + std::set bad_rootplots; + std::set bad_plotlyplots; + + uint16_t last_i; + std::vector ids; bool pipeline_error; + bool had_error; + DatabaseJobStep checkpoint; + DatabaseJobStep endpoint; + size_t checkpoint_i; + size_t checkpoint_j; + size_t endpoint_i; + size_t endpoint_j; + void clear(){ read_queue.clear(); write_queue.clear(); @@ -43,6 +61,15 @@ struct DatabaseJobStruct { monitoring_queue.clear(); rootplot_queue.clear(); plotlyplot_queue.clear(); + + bad_logs.clear(); + bad_mons.clear(); + bad_rootplots.clear(); + bad_plotlyplots.clear(); + + had_error=false; + endpoint=DatabaseJobStep::finish; + } }; diff --git a/UserTools/ResultWorkers/ResultWorkers.cpp b/UserTools/ResultWorkers/ResultWorkers.cpp index 29f2811..b40de80 100644 --- a/UserTools/ResultWorkers/ResultWorkers.cpp +++ b/UserTools/ResultWorkers/ResultWorkers.cpp @@ -65,9 +65,9 @@ void ResultWorkers::Thread(Thread_args* args){ ResultJobDistributor_args* m_args = reinterpret_cast(args); // grab a batch of read queries, with results awaiting conversion - std::unique_lock locker(m_args->m_data->read_replies_mtx); - if(m_args->m_data->read_replies.empty()) return; - std::swap(m_args->m_data->read_replies, m_args->local_msg_queue); + std::unique_lock locker(m_args->m_data->query_results_mtx); + if(m_args->m_data->query_results.empty()) return; + std::swap(m_args->m_data->query_results, m_args->local_msg_queue); locker.unlock(); // add a job for each batch to the queue diff --git a/UserTools/WriteWorkers/WriteWorkers.cpp b/UserTools/WriteWorkers/WriteWorkers.cpp index d3f15f2..0b45bc0 100644 --- a/UserTools/WriteWorkers/WriteWorkers.cpp +++ b/UserTools/WriteWorkers/WriteWorkers.cpp @@ -191,7 +191,7 @@ bool WriteWorkers::WriteMessageJob(void*& arg){ break; case query_topic::generic: // these can't be buffered, just note their indices for the DB workers - m_args->local_msg_queue->generic_write_query_indices.push_back(i); + m_args->local_msg_queue->generic_query_indices.push_back(i); continue; break; default: From 8562b687830ab51525500dad0208b7aab3fa515e Mon Sep 17 00:00:00 2001 From: Marcus O'Flaherty Date: Tue, 6 Jan 2026 16:42:44 +0000 Subject: [PATCH 07/12] a bit of cleanup --- DataModel/ManagedSocket.h | 2 +- DataModel/MonitoringVariables.h | 13 +- UserTools/DatabaseWorkers/DatabaseWorkers.cpp | 4 +- UserTools/Monitoring/Monitoring.cpp | 251 ++++++++---------- .../MulticastReceiverSender.cpp | 12 +- .../ReadQueryReceiverReplySender.cpp | 21 +- UserTools/ResultWorkers/ResultWorkers.cpp | 151 ++++------- .../WriteQueryReceiver/WriteQueryReceiver.cpp | 5 +- UserTools/WriteWorkers/WriteWorkers.cpp | 1 - 9 files changed, 202 insertions(+), 258 deletions(-) diff --git a/DataModel/ManagedSocket.h b/DataModel/ManagedSocket.h index 9c36b29..661210f 100644 --- a/DataModel/ManagedSocket.h +++ b/DataModel/ManagedSocket.h @@ -9,7 +9,7 @@ struct ManagedSocket { std::mutex socket_mtx; zmq::socket_t* socket=nullptr; std::string service_name; - std::string remote_port; +/* std::string remote_port;*/ std::string remote_port_name; std::map connections; }; diff --git a/DataModel/MonitoringVariables.h b/DataModel/MonitoringVariables.h index 4f401b0..efe9317 100644 --- a/DataModel/MonitoringVariables.h +++ b/DataModel/MonitoringVariables.h @@ -7,7 +7,7 @@ class MonitoringVariables { public: MonitoringVariables(){}; virtual ~MonitoringVariables(){}; - virtual std::string toJSON()=0; + virtual std::string toJSON(){ return ""; }; ToolFramework::Store vars; std::mutex mtx; void Clear(){ @@ -23,14 +23,17 @@ class MonitoringVariables { return; } - std::string GetJson(){ + std::string GetJSON(){ std::unique_lock locker(mtx); std::string ret; vars >> ret; - ret.pop_back(); // remove trailing '}' std::string ret2 = toJSON(); - ret2[0]=','; // replace leading '{' with ',' to concatenate the two - return ret+ret2; + if(!ret2.empty()){ + ret.pop_back(); // remove trailing '}' + ret2[0]=','; // replace leading '{' with ',' to concatenate the two + ret += ret2; + } + return ret; } }; diff --git a/UserTools/DatabaseWorkers/DatabaseWorkers.cpp b/UserTools/DatabaseWorkers/DatabaseWorkers.cpp index 4f7a091..81769c8 100644 --- a/UserTools/DatabaseWorkers/DatabaseWorkers.cpp +++ b/UserTools/DatabaseWorkers/DatabaseWorkers.cpp @@ -370,7 +370,7 @@ bool DatabaseWorkers::DatabaseJob(void*& arg){ ++(m_args->monitoring_vars->readquery_submissions); } catch (std::exception& e){ ++(m_args->monitoring_vars->readquery_submissions_failed); - query.result.clear(); // this sets `m_query=nullptr` so maybe we can use that as a check...? FIXME + query.result.clear(); query.err = current_exception_name()+": "+e.what(); // store info about what failed std::cerr<<"dbworker read query '"<last_i; ++i){ QueryBatch* batch = m_args->write_queue[i]; printf("executing %d generic queries for next batch\n",batch->generic_query_indices.size()); - size_t last_j = (m_args->endpoint==DatabaseJobStep::logging) ? m_args->endpoint_j : batch->generic_query_indices.size(); + size_t last_j = (m_args->endpoint==DatabaseJobStep::generics) ? m_args->endpoint_j : batch->generic_query_indices.size(); for(size_t j=m_args->checkpoint_j; jqueries[batch->generic_query_indices[j]]; if(!query.err.empty()) continue; // skip queries flagged bad on a previous iteration diff --git a/UserTools/Monitoring/Monitoring.cpp b/UserTools/Monitoring/Monitoring.cpp index a0edca9..14f135c 100644 --- a/UserTools/Monitoring/Monitoring.cpp +++ b/UserTools/Monitoring/Monitoring.cpp @@ -71,144 +71,129 @@ void Monitoring::Thread(Thread_args* args){ Monitoring_args* m_args = dynamic_cast(args); - if((m_args->last_send - std::chrono::steady_clock::now()) > m_args->monitoring_period_ms){ - - std::unique_lock locker(m_args->m_data->monitoring_variables_mtx); - - for(std::pair& mon : m_args->m_data->monitoring_variables){ - - std::string s="{\"time\":0, \"device\":\"middleman\",\"subject\":\""+mon.first+"\", \"data\":"+mon.second->GetJson()+"}"; - - std::unique_lock locker(m_args->m_data->out_mon_msg_queue_mtx); - m_args->m_data->out_mon_msg_queue.push_back(s); - - } - - /* - // to calculate rates we need to know the difference in number - // of reads/writes since last time. So get the last values - unsigned long last_write_query_count; - unsigned long last_read_query_count; - unsigned long last_log_count; - unsigned long last_mon_count; - MonitoringStore.Get("write_queries_recvd", last_write_query_count); - MonitoringStore.Get("read_queries_recvd", last_read_query_count); - MonitoringStore.Get("logs_recvd", last_log_count); - MonitoringStore.Get("mons_recvd", last_mon_count); - - // calculate rates are per minute - elapsed_time = boost::posix_time::microsec_clock::universal_time() - last_stats_calc; - - float read_query_rate = (elapsed_time.total_seconds()==0) ? 0 : - ((read_queries_recvd - last_read_query_count) * 60.) / elapsed_time.total_seconds(); - float write_query_rate = (elapsed_time.total_seconds()==0) ? 0 : - ((write_queries_recvd - last_write_query_count) * 60.) / elapsed_time.total_seconds(); - float log_rate = (elapsed_time.total_seconds()==0) ? 0 : - ((logs_recvd - last_log_count) * 60.) / elapsed_time.total_seconds(); - float mon_rate = (elapsed_time.total_seconds()==0) ? 0 : - ((mons_recvd - last_mon_count) * 60.) / elapsed_time.total_seconds(); - - // dump all stats into a Store. - MonitoringStore.Set("min_loop_time",min_loop_ms); - MonitoringStore.Set("max_loop_time",max_loop_ms); - MonitoringStore.Set("loops",loops); - MonitoringStore.Set("loop_rate [Hz]",loops/elapsed_time.total_seconds()); - MonitoringStore.Set("write_queries_waiting",wrt_txn_queue.size()); - MonitoringStore.Set("read_queries_waiting",rd_txn_queue.size()); - MonitoringStore.Set("replies_waiting",resp_queue.size()); - MonitoringStore.Set("incoming_logs_waiting",in_log_queue.size()); - MonitoringStore.Set("incoming_mons_waiting",in_mon_queue.size()); - MonitoringStore.Set("out_multicasts_waiting",out_multicast_queue.size()); - MonitoringStore.Set("cached_queries",cache.size()); - MonitoringStore.Set("write_queries_recvd", write_queries_recvd); - MonitoringStore.Set("write_query_recv_fails", write_query_recv_fails); - MonitoringStore.Set("read_queries_recvd", read_queries_recvd); - MonitoringStore.Set("read_query_recv_fails", read_query_recv_fails); - MonitoringStore.Set("logs_recvd", logs_recvd.load()); - MonitoringStore.Set("mons_recvd", mons_recvd.load()); - MonitoringStore.Set("log_recv_fails", log_recv_fails.load()); - MonitoringStore.Set("mon_recv_fails", mon_recv_fails.load()); - MonitoringStore.Set("mm_broadcasts_recvd", mm_broadcasts_recvd); - MonitoringStore.Set("mm_broadcast_recv_fails", mm_broadcast_recv_fails); - MonitoringStore.Set("write_queries_failed", write_queries_failed); - MonitoringStore.Set("log_queries_failed", log_queries_failed.load()); - MonitoringStore.Set("mon_queries_failed", mon_queries_failed.load()); - MonitoringStore.Set("read_queries_failed", read_queries_failed); - MonitoringStore.Set("reps_sent", reps_sent); - MonitoringStore.Set("rep_send_fails", rep_send_fails); - MonitoringStore.Set("multicasts_sent", multicasts_sent); - MonitoringStore.Set("multicast_send_fails", multicast_send_fails); - MonitoringStore.Set("mm_broadcasts_sent", mm_broadcasts_sent); - MonitoringStore.Set("mm_broadcasts_failed", mm_broadcasts_failed); - MonitoringStore.Set("master_clashes", master_clashes); - MonitoringStore.Set("master_clashes_failed", master_clashes_failed); - MonitoringStore.Set("standby_clashes", standby_clashes); - MonitoringStore.Set("standby_clashes_failed", standby_clashes_failed); - MonitoringStore.Set("self_promotions", self_promotions); - MonitoringStore.Set("self_promotions_failed", self_promotions_failed); - MonitoringStore.Set("promotions", promotions); - MonitoringStore.Set("promotions_failed", promotions_failed); - MonitoringStore.Set("demotions", demotions); - MonitoringStore.Set("demotions_failed", demotions_failed); - MonitoringStore.Set("dropped_writes", dropped_writes); - MonitoringStore.Set("dropped_reads", dropped_reads); - MonitoringStore.Set("dropped_resps", dropped_resps); - MonitoringStore.Set("dropped_log_in", dropped_log_in); - MonitoringStore.Set("dropped_mon_in", dropped_mon_in); - MonitoringStore.Set("dropped_logs_out", dropped_logs_out); - MonitoringStore.Set("dropped_monitoring_out", dropped_monitoring_out); - MonitoringStore.Set("read_query_rate", read_query_rate); - MonitoringStore.Set("write_query_rate", write_query_rate); - - // convert Store into a json - std::string json_stats; - MonitoringStore >> json_stats; - - // update the web page status - // actually, this only supports a single word, with no spaces? - std::stringstream status; - status << " read qrys (rcvd/rcv errs/qry errs):["<SetValue(status.str()); - -// // temporarily bypass the database logging level to ensure it gets sent to the monitoring db. -// int db_verbosity_tmp = db_verbosity; -// db_verbosity = 10; -// Log(Concat("Monitoring Stats:",json_stats),15); -// db_verbosity = db_verbosity_tmp; - - //std::string sql_qry = "INSERT INTO monitoring ( time, device, subject, data ) VALUES ( 'now()', '" - // + my_id+"','stats','"+json_stats+"' );"; - - std::string multicast_msg = "{ \"topic\":\"monitoring\"" - ", \"subject\":\"stats\"" - ", \"device\":\""+escape_json(my_id)+"\"" - + ", \"time\":"+std::to_string(time(nullptr)*1000) // ms since unix epoch - + ", \"data\":\""+json_stats+"\" }"; - - if(am_master){ - in_mon_queue_mtx.lock(); - in_mon_queue.push_back(multicast_msg); - in_mon_queue_mtx.unlock(); - } else { - out_multicast_queue.push_back(multicast_msg); // FIXME FIXME FIXME needs to go to mon port - } + m_args->last_send = std::chrono::steady_clock::now(); + //printf("Monitoring sending stats\n"); + + std::unique_lock locker(m_args->m_data->monitoring_variables_mtx); + + for(std::pair& mon : m_args->m_data->monitoring_variables){ - // reset counters - last_send = std::chrono::steady_clocknow(); + std::string s="{\"time\":0, \"device\":\"middleman\",\"subject\":\""+mon.first+"\", \"data\":"+mon.second->GetJSON()+"}"; - min_loop_ms=9999999; - max_loop_ms=0; - loops=0; + // FIXME or just put into received queue for insertion to DB? + std::unique_lock locker(m_args->m_data->out_mon_msg_queue_mtx); + m_args->m_data->out_mon_msg_queue.push_back(s); - */ } + /* + // FIXME calculate rates and stuff, expand monitoring in Tools + // to calculate rates we need to know the difference in number + // of reads/writes since last time. So get the last values + unsigned long last_write_query_count; + unsigned long last_read_query_count; + unsigned long last_log_count; + unsigned long last_mon_count; + MonitoringStore.Get("write_queries_recvd", last_write_query_count); + MonitoringStore.Get("read_queries_recvd", last_read_query_count); + MonitoringStore.Get("logs_recvd", last_log_count); + MonitoringStore.Get("mons_recvd", last_mon_count); + + // calculate message rates + elapsed_time = boost::posix_time::microsec_clock::universal_time() - last_stats_calc; + + float read_query_rate = (elapsed_time.total_seconds()==0) ? 0 : + ((read_queries_recvd - last_read_query_count) * 60.) / elapsed_time.total_seconds(); + float write_query_rate = (elapsed_time.total_seconds()==0) ? 0 : + ((write_queries_recvd - last_write_query_count) * 60.) / elapsed_time.total_seconds(); + float log_rate = (elapsed_time.total_seconds()==0) ? 0 : + ((logs_recvd - last_log_count) * 60.) / elapsed_time.total_seconds(); + float mon_rate = (elapsed_time.total_seconds()==0) ? 0 : + ((mons_recvd - last_mon_count) * 60.) / elapsed_time.total_seconds(); + + // dump all stats into a Store. + MonitoringStore.Set("min_loop_time",min_loop_ms); + MonitoringStore.Set("max_loop_time",max_loop_ms); + MonitoringStore.Set("loops",loops); + MonitoringStore.Set("loop_rate [Hz]",loops/elapsed_time.total_seconds()); + MonitoringStore.Set("write_queries_waiting",wrt_txn_queue.size()); + MonitoringStore.Set("read_queries_waiting",rd_txn_queue.size()); + MonitoringStore.Set("replies_waiting",resp_queue.size()); + MonitoringStore.Set("incoming_logs_waiting",in_log_queue.size()); + MonitoringStore.Set("incoming_mons_waiting",in_mon_queue.size()); + MonitoringStore.Set("out_multicasts_waiting",out_multicast_queue.size()); + MonitoringStore.Set("cached_queries",cache.size()); + MonitoringStore.Set("mm_broadcasts_recvd", mm_broadcasts_recvd); + MonitoringStore.Set("mm_broadcast_recv_fails", mm_broadcast_recv_fails); + MonitoringStore.Set("mm_broadcasts_sent", mm_broadcasts_sent); + MonitoringStore.Set("mm_broadcasts_failed", mm_broadcasts_failed); + MonitoringStore.Set("master_clashes", master_clashes); + MonitoringStore.Set("master_clashes_failed", master_clashes_failed); + MonitoringStore.Set("standby_clashes", standby_clashes); + MonitoringStore.Set("standby_clashes_failed", standby_clashes_failed); + MonitoringStore.Set("self_promotions", self_promotions); + MonitoringStore.Set("self_promotions_failed", self_promotions_failed); + MonitoringStore.Set("promotions", promotions); + MonitoringStore.Set("promotions_failed", promotions_failed); + MonitoringStore.Set("demotions", demotions); + MonitoringStore.Set("demotions_failed", demotions_failed); + MonitoringStore.Set("dropped_writes", dropped_writes); + MonitoringStore.Set("dropped_reads", dropped_reads); + MonitoringStore.Set("dropped_resps", dropped_resps); + MonitoringStore.Set("dropped_log_in", dropped_log_in); + MonitoringStore.Set("dropped_mon_in", dropped_mon_in); + MonitoringStore.Set("dropped_logs_out", dropped_logs_out); + MonitoringStore.Set("dropped_monitoring_out", dropped_monitoring_out); + MonitoringStore.Set("read_query_rate", read_query_rate); + MonitoringStore.Set("write_query_rate", write_query_rate); + + // convert Store into a json + std::string json_stats; + MonitoringStore >> json_stats; + + // update the web page status + // actually, this only supports a single word, with no spaces? + std::stringstream status; + status << " read qrys (rcvd/rcv errs/qry errs):["<SetValue(status.str()); + +// // temporarily bypass the database logging level to ensure it gets sent to the monitoring db. +// int db_verbosity_tmp = db_verbosity; +// db_verbosity = 10; +// Log(Concat("Monitoring Stats:",json_stats),15); +// db_verbosity = db_verbosity_tmp; + + //std::string sql_qry = "INSERT INTO monitoring ( time, device, subject, data ) VALUES ( 'now()', '" + // + my_id+"','stats','"+json_stats+"' );"; + + std::string multicast_msg = "{ \"topic\":\"monitoring\"" + ", \"subject\":\"stats\"" + ", \"device\":\""+escape_json(my_id)+"\"" + + ", \"time\":"+std::to_string(time(nullptr)*1000) // ms since unix epoch + + ", \"data\":\""+json_stats+"\" }"; + + if(am_master){ + in_mon_queue_mtx.lock(); + in_mon_queue.push_back(multicast_msg); + in_mon_queue_mtx.unlock(); + } else { + out_multicast_queue.push_back(multicast_msg); + } + + min_loop_ms=9999999; + max_loop_ms=0; + loops=0; + + */ + + std::this_thread::sleep_until(m_args->last_send+m_args->monitoring_period_ms); + return; } diff --git a/UserTools/MulticastReceiverSender/MulticastReceiverSender.cpp b/UserTools/MulticastReceiverSender/MulticastReceiverSender.cpp index 77dd830..bab3caf 100644 --- a/UserTools/MulticastReceiverSender/MulticastReceiverSender.cpp +++ b/UserTools/MulticastReceiverSender/MulticastReceiverSender.cpp @@ -2,6 +2,10 @@ #include +namespace { + const uint32_t MAX_UDP_PACKET_SIZE = 655355; +} + MulticastReceiverSender::MulticastReceiverSender():Tool(){} @@ -287,8 +291,8 @@ void MulticastReceiverSender::Thread(Thread_args* arg){ if(m_args->poll.revents & ZMQ_POLLIN){ printf("%s receiving message\n",m_args->m_tool_name.c_str()); - // read the messge FIXME name max num bytes in multicast message - m_args->get_ok = recvfrom(m_args->socket, m_args->message, 655355, 0, (struct sockaddr*)&m_args->addr, &m_args->addrlen); + // read the messge + m_args->get_ok = recvfrom(m_args->socket, m_args->message, MAX_UDP_PACKET_SIZE, 0, (struct sockaddr*)&m_args->addr, &m_args->addrlen); if(m_args->get_ok <= 0){ ++(m_args->monitoring_vars->rcv_fails); // FIXME better logging @@ -300,7 +304,7 @@ void MulticastReceiverSender::Thread(Thread_args* arg){ ++(m_args->monitoring_vars->msgs_rcvd); //m_data->Log(m_tool_name+": Received multicast message '"+std::string(m_args->message) - // +"' from "+std::string{inet_ntoa(&m_args->addr->sin_addr)},12); // FIXME streamline + // +"' from "+std::string{inet_ntoa(&m_args->addr->sin_addr)},12); m_args->in_local_queue->emplace_back(m_args->message); @@ -342,7 +346,5 @@ void MulticastReceiverSender::Thread(Thread_args* arg){ } - std::this_thread::sleep_for(std::chrono::milliseconds(100)); - return; } diff --git a/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.cpp b/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.cpp index e2be98d..8248d6b 100644 --- a/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.cpp +++ b/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.cpp @@ -2,7 +2,6 @@ ReadQueryReceiverReplySender::ReadQueryReceiverReplySender():Tool(){} -//FIXME call it readqueryreceviverandreplysender bool ReadQueryReceiverReplySender::Initialise(std::string configfile, DataModel &data){ InitialiseTool(data); @@ -83,8 +82,8 @@ bool ReadQueryReceiverReplySender::Initialise(std::string configfile, DataModel thread_args.m_data = m_data; thread_args.m_tool_name = m_tool_name; thread_args.monitoring_vars = &monitoring_vars; - thread_args.socket = managed_socket->socket; // FIXME get from struct. - thread_args.socket_mtx = &managed_socket->socket_mtx; // FIXME get from struct. For sharing socket with SocketManager + thread_args.socket = managed_socket->socket; + thread_args.socket_mtx = &managed_socket->socket_mtx; thread_args.poll_timeout_ms = poll_timeout_ms; thread_args.polls.emplace_back(*managed_socket->socket,0,ZMQ_POLLIN,0); thread_args.polls.emplace_back(*managed_socket->socket,0,ZMQ_POLLOUT,0); @@ -113,6 +112,7 @@ bool ReadQueryReceiverReplySender::Execute(){ Initialise(m_configfile, *m_data); // FIXME should we give up if Initialise returns false? should we set StopLoop to 1? ++(monitoring_vars.thread_crashes); } + // FIXME add monitoring info: queue sizes return true; } @@ -126,17 +126,8 @@ bool ReadQueryReceiverReplySender::Finalise(){ std::cerr<<"ReadReceiver thread terminated"<num_threads--; - // FIXME ensure we don't interfere with SocketManager? Better to leave that to do deletion in its destructor? - /* - if(managed_socket->socket){ - std::unique_lock lock(managed_socket->socket_mtx); - delete managed_socket->socket; - managed_socket->socket=nullptr; - } - */ - + std::unique_lock locker(m_args->m_data->managed_sockets_mtx); if(m_data->managed_sockets.count(remote_port_name)){ - std::unique_lock locker(m_data->managed_sockets_mtx); ManagedSocket* sock = m_data->managed_sockets[remote_port_name]; m_data->managed_sockets.erase(remote_port_name); locker.unlock(); @@ -144,7 +135,7 @@ bool ReadQueryReceiverReplySender::Finalise(){ delete sock; } - std::unique_lock locker(m_data->monitoring_variables_mtx); + locker = std::unique_lock(m_data->monitoring_variables_mtx); m_data->monitoring_variables.erase(m_tool_name); Log(m_tool_name+": Finished",v_warning); @@ -292,7 +283,7 @@ void ReadQueryReceiverReplySender::Thread(Thread_args* args){ // write // ===== //m_args->m_data->Log("Size of reply queue is "+ - // (m_args->out_local_queue ? std::to_string(m_args->out_local_queue.size()) : std::string{"0"}),10); // FIXME + // (m_args->out_local_queue ? std::to_string(m_args->out_local_queue.size()) : std::string{"0"}),10); // send next response message, if we have one in the queue if(m_args->out_local_queue!=nullptr && m_args->out_iout_local_queue->queries.size()){ diff --git a/UserTools/ResultWorkers/ResultWorkers.cpp b/UserTools/ResultWorkers/ResultWorkers.cpp index b40de80..f0e45dc 100644 --- a/UserTools/ResultWorkers/ResultWorkers.cpp +++ b/UserTools/ResultWorkers/ResultWorkers.cpp @@ -148,7 +148,7 @@ bool ResultWorkers::ResultJob(void*& arg){ for(ZmqQuery& query : m_args->batch->queries){ // set whether the query succeeded or threw an exception - if(query.result.query().empty()){ // FIXME not sure if this is a good check necessarily, esp w/pipelining? + if(!query.err.empty()){ query.setsuccess(0); query.setresponserows(1); query.setresponse(0, query.err); @@ -159,66 +159,23 @@ bool ResultWorkers::ResultJob(void*& arg){ // returned rows are sent back formatted as JSON, with each row a new zmq::message_t // resize zmq vector in preparation query.setresponserows(std::size(query.result)); + // should always be 0 or 1 rows FIXME we could add a check for that here - if(query_topic{query.topic()[2]}!=query_topic::generic){ - - // just for good measure, when we try to access the pqxx result, - // enclose within try just in case it throws something - try { - // standard queries generated by the libDAQInterface use `row_to_json` - // to request results already packaged up into one JSON per row - // so all we need to do is copy that into the zmq message - for(size_t i=0; imonitoring_vars->result_access_errors); - } - - } else { - - // just for good measure, when we try to access the pqxx result, - // enclose within try just in case it throws something - try { - // TODO if we can safely shoehorn in a wrapping call to `row_to_json` - // around a user's generic sql, we can combine this with the above. - // But, given the arbitrary complexity of statements, this may not be possible. - // in which case, we need to loop over rows and convert them to JSON manually - for(size_t i=0; itmpval = "{"; - for (pqxx::row::iterator it=query.result[i].begin(); ittmpval += ", "; - m_args->tmpval += "\"" + std::string{it->name()} + "\":"; - // Field values are returned bare: i.e. '3' or 'cat' or '{"iam":"ajson"}' - // but to convert this into JSON, strings need to be quoted: - // i.e. { "field1":3, "field2":"cat", "field3":{"iam":"ajson"} } - // this means we need to add enclosing quotes *only* for string fields - if((it->type()==18) || (it->type()==25) || (it->type()==1042) || (it->type()==1043)){ - m_args->tmpval += "\""+std::string{it->c_str()}+"\""; - } else { - m_args->tmpval += it->c_str(); - } - } - m_args->tmpval += "}"; - - query.setresponse(i, m_args->tmpval); - } - - } catch (std::exception& e){ - std::cerr<<"caught "<monitoring_vars->result_access_errors); + // just for good measure, when we try to access the pqxx result, + // enclose within try just in case it throws something + try { + // standard queries generated by the libDAQInterface use `row_to_json` + // to request results already packaged up into one JSON per row + // so all we need to do is copy that into the zmq message + for(size_t i=0; imonitoring_vars->result_access_errors); + } // release pqxx::result and clear error query.Clear(); @@ -259,8 +216,6 @@ bool ResultWorkers::ResultJob(void*& arg){ if(devconfigs_ok){ query.setresponse(0, m_args->batch->devconfig_version_nums[devconfig_i++]); } else { - // FIXME is it worth propagating the error back to the user? - // since it's a batch insert, the error may have nothing to do with their query... query.setresponse(0, m_args->batch->devconfig_batch_err); } break; @@ -306,43 +261,53 @@ bool ResultWorkers::ResultJob(void*& arg){ break; case query_topic::generic: - // just for good measure, when we try to access the pqxx result, - // enclose within try just in case it throws something - try { - // TODO if we can safely shoehorn in a wrapping call to `row_to_json` - // around a user's generic sql, we can combine this with the above. - // But, given the arbitrary complexity of statements, this may not be possible. - // in which case, we need to loop over rows and convert them to JSON manually - query.setresponserows(std::size(query.result)); - for(size_t i=0; itmpval = "{"; - for (pqxx::row::iterator it=query.result[i].begin(); ittmpval += ", "; - m_args->tmpval += "\"" + std::string{it->name()} + "\":"; - // Field values are returned bare: i.e. '3' or 'cat' or '{"iam":"ajson"}' - // but to convert this into JSON, strings need to be quoted: - // i.e. { "field1":3, "field2":"cat", "field3":{"iam":"ajson"} } - // this means we need to add enclosing quotes *only* for string fields - if((it->type()==18) || (it->type()==25) || (it->type()==1042) || (it->type()==1043)){ - m_args->tmpval += "\""+std::string{it->c_str()}+"\""; - } else { - m_args->tmpval += it->c_str(); - } - } - m_args->tmpval += "}"; - - query.setresponse(i, m_args->tmpval); - } - } catch (std::exception& e){ - std::cerr<<"caught "<monitoring_vars->result_access_errors); + + } else { + query.setsuccess(1); + + try { + // TODO if we can safely shoehorn in a wrapping call to `row_to_json` + // around a user's generic sql, we can combine this with the above. + // But, given the arbitrary complexity of statements, this may not be possible. + // in which case, we need to loop over rows and convert them to JSON manually + query.setresponserows(std::size(query.result)); + for(size_t i=0; itmpval = "{"; + for (pqxx::row::iterator it=query.result[i].begin(); ittmpval += ", "; + m_args->tmpval += "\"" + std::string{it->name()} + "\":"; + // Field values are returned bare: i.e. '3' or 'cat' or '{"iam":"ajson"}' + // but to convert this into JSON, strings need to be quoted: + // i.e. { "field1":3, "field2":"cat", "field3":{"iam":"ajson"} } + // this means we need to add enclosing quotes *only* for string fields + if((it->type()==18) || (it->type()==25) || (it->type()==1042) || (it->type()==1043)){ + m_args->tmpval += "\""+std::string{it->c_str()}+"\""; + } else { + m_args->tmpval += it->c_str(); + } + } + m_args->tmpval += "}"; + + query.setresponse(i, m_args->tmpval); + } + + } catch (std::exception& e){ + std::cerr<<"caught "<monitoring_vars->result_access_errors); + } } + break; default: diff --git a/UserTools/WriteQueryReceiver/WriteQueryReceiver.cpp b/UserTools/WriteQueryReceiver/WriteQueryReceiver.cpp index df7aaae..d87d524 100644 --- a/UserTools/WriteQueryReceiver/WriteQueryReceiver.cpp +++ b/UserTools/WriteQueryReceiver/WriteQueryReceiver.cpp @@ -93,7 +93,6 @@ bool WriteQueryReceiver::Initialise(std::string configfile, DataModel &data){ return true; } -// FIXME renoame to writequeryreceiver bool WriteQueryReceiver::Execute(){ if(!thread_args.running){ @@ -208,8 +207,8 @@ void WriteQueryReceiver::Thread(Thread_args* args){ printf("%s receiving message\n",m_args->m_tool_name.c_str()); if(m_args->make_new){ - m_args->in_local_queue->queries.emplace_back(); // FIXME we could resize(local_buffer_size) on retreive new - m_args->make_new = false; // then resize down to actual size on transfer out + m_args->in_local_queue->queries.emplace_back(); + m_args->make_new = false; } ZmqQuery& msg_buf = m_args->in_local_queue->queries.back(); msg_buf.parts.resize(4); diff --git a/UserTools/WriteWorkers/WriteWorkers.cpp b/UserTools/WriteWorkers/WriteWorkers.cpp index 0b45bc0..096dbb7 100644 --- a/UserTools/WriteWorkers/WriteWorkers.cpp +++ b/UserTools/WriteWorkers/WriteWorkers.cpp @@ -168,7 +168,6 @@ bool WriteWorkers::WriteMessageJob(void*& arg){ // as t(time timestamptz, name text, data jsonb) returning version;" // as before, such batches need to be grouped according to destination table switch(query_topic{query.topic()[2]}){ - // FIXME switch letter to query_type enum class case query_topic::alarm: // alarm insertions require no return value, // but we still need to send back an acknowledgement once the alarm is inserted From 28af414ce48e57afcb8708d3b14bcc751b1d6386 Mon Sep 17 00:00:00 2001 From: Marcus O'Flaherty Date: Fri, 9 Jan 2026 16:27:31 +0000 Subject: [PATCH 08/12] move multicast batch strings to a Pool for re-use --- DataModel/DataModel.h | 10 +++-- DataModel/QueryBatch.h | 10 ++--- UserTools/DatabaseWorkers/DatabaseWorkers.cpp | 32 ++++++++------- UserTools/DatabaseWorkers/DatabaseWorkers.h | 8 ++-- .../MulticastWorkers/MulticastWorkers.cpp | 40 ++++++++++--------- UserTools/MulticastWorkers/MulticastWorkers.h | 8 ++-- .../ReadQueryReceiverReplySender.cpp | 2 +- 7 files changed, 60 insertions(+), 50 deletions(-) diff --git a/DataModel/DataModel.h b/DataModel/DataModel.h index 8a20ced..017f478 100644 --- a/DataModel/DataModel.h +++ b/DataModel/DataModel.h @@ -105,16 +105,18 @@ class DataModel : public DAQDataModelBase { // FIXME these strings represent batches of multicast messages, so could be very large. // each push_back could require reallocation, which could involve moving a lot of very large message buffers // FIXME make these pointers, put the strings (maybe make a struct? maybe just a typedef/alias?) in a pool? - std::vector log_query_queue; + Pool multicast_batch_pool{true, 5000, 100}; + + std::vector log_query_queue; std::mutex log_query_queue_mtx; - std::vector mon_query_queue; + std::vector mon_query_queue; std::mutex mon_query_queue_mtx; - std::vector rootplot_query_queue; + std::vector rootplot_query_queue; std::mutex rootplot_query_queue_mtx; - std::vector plotlyplot_query_queue; + std::vector plotlyplot_query_queue; std::mutex plotlyplot_query_queue_mtx; /* ----------------------------------------- */ diff --git a/DataModel/QueryBatch.h b/DataModel/QueryBatch.h index ed96926..94c4678 100644 --- a/DataModel/QueryBatch.h +++ b/DataModel/QueryBatch.h @@ -26,11 +26,11 @@ struct QueryBatch { std::vector generic_query_indices; // set by database workers after batch insert - std::vector devconfig_version_nums; - std::vector runconfig_version_nums; - std::vector calibration_version_nums; - std::vector plotlyplot_version_nums; - std::vector rootplot_version_nums; + std::vector devconfig_version_nums; + std::vector runconfig_version_nums; + std::vector calibration_version_nums; + std::vector plotlyplot_version_nums; + std::vector rootplot_version_nums; std::string alarm_batch_err; std::string devconfig_batch_err; diff --git a/UserTools/DatabaseWorkers/DatabaseWorkers.cpp b/UserTools/DatabaseWorkers/DatabaseWorkers.cpp index 81769c8..eb612c0 100644 --- a/UserTools/DatabaseWorkers/DatabaseWorkers.cpp +++ b/UserTools/DatabaseWorkers/DatabaseWorkers.cpp @@ -478,10 +478,10 @@ bool DatabaseWorkers::DatabaseJob(void*& arg){ printf("calling prepped for %d logging batches\n",m_args->logging_queue.size()); for(size_t i=0; ilast_i; ++i){ if(m_args->bad_logs.count(i)) continue; - std::string& batch = m_args->logging_queue[i]; - printf("dbworker inserting logging batch: '%s'\n",batch.c_str()); + std::string* batch = m_args->logging_queue[i]; + printf("dbworker inserting logging batch: '%s'\n",batch->c_str()); try { - tx->exec(pqxx::prepped{"logging_insert"}, pqxx::params{batch}); + tx->exec(pqxx::prepped{"logging_insert"}, pqxx::params{*batch}); ++(m_args->monitoring_vars->logging_submissions); } catch (std::exception& e){ std::cerr<<"dbworker log insert failed with "<m_data->multicast_batch_pool.Add(batch); } if(!m_args->had_error){ if(m_args->endpoint==DatabaseJobStep::monitoring) goto commitit; @@ -507,9 +508,9 @@ bool DatabaseWorkers::DatabaseJob(void*& arg){ printf("calling prepped for %d monitoring batches\n",m_args->monitoring_queue.size()); for(size_t i=0; ilast_i; ++i){ if(m_args->bad_mons.count(i)) continue; - std::string& batch = m_args->monitoring_queue[i]; + std::string* batch = m_args->monitoring_queue[i]; try { - tx->exec(pqxx::prepped{"monitoring_insert"}, pqxx::params{batch}); + tx->exec(pqxx::prepped{"monitoring_insert"}, pqxx::params{*batch}); ++(m_args->monitoring_vars->monitoring_submissions); } catch (std::exception& e){ ++(m_args->monitoring_vars->monitoring_submissions_failed); @@ -521,6 +522,7 @@ bool DatabaseWorkers::DatabaseJob(void*& arg){ delete tx; tx = new pqxx::work(*conn.get()); } + m_args->m_data->multicast_batch_pool.Add(batch); } if(!m_args->had_error){ if(m_args->endpoint==DatabaseJobStep::rootplots) goto commitit; @@ -533,9 +535,9 @@ bool DatabaseWorkers::DatabaseJob(void*& arg){ printf("calling prepped for %d rootplot batches\n",m_args->rootplot_queue.size()); for(size_t i=0; ilast_i; ++i){ if(m_args->bad_rootplots.count(i)) continue; - std::string& batch = m_args->rootplot_queue[i]; + std::string* batch = m_args->rootplot_queue[i]; try { - tx->exec(pqxx::prepped{"rootplots_insert"}, pqxx::params{batch}); + tx->exec(pqxx::prepped{"rootplots_insert"}, pqxx::params{*batch}); ++(m_args->monitoring_vars->rootplot_submissions); } catch (std::exception& e){ ++(m_args->monitoring_vars->rootplot_submissions_failed); @@ -547,6 +549,7 @@ bool DatabaseWorkers::DatabaseJob(void*& arg){ delete tx; tx = new pqxx::work(*conn.get()); } + m_args->m_data->multicast_batch_pool.Add(batch); } if(!m_args->had_error){ if(m_args->endpoint==DatabaseJobStep::plotlyplots) goto commitit; @@ -559,9 +562,9 @@ bool DatabaseWorkers::DatabaseJob(void*& arg){ printf("calling prepped for %d plotlyplot batches\n",m_args->plotlyplot_queue.size()); for(size_t i=0; ilast_i; ++i){ if(m_args->bad_plotlyplots.count(i)) continue; - std::string& batch = m_args->plotlyplot_queue[i]; + std::string* batch = m_args->plotlyplot_queue[i]; try { - tx->exec(pqxx::prepped{"plotlyplots_insert"}, pqxx::params{batch}); + tx->exec(pqxx::prepped{"plotlyplots_insert"}, pqxx::params{*batch}); ++(m_args->monitoring_vars->plotlyplot_submissions); } catch (std::exception& e){ ++(m_args->monitoring_vars->plotlyplot_submissions_failed); @@ -573,6 +576,7 @@ bool DatabaseWorkers::DatabaseJob(void*& arg){ delete tx; tx = new pqxx::work(*conn.get()); } + m_args->m_data->multicast_batch_pool.Add(batch); } if(!m_args->had_error){ if(m_args->endpoint==DatabaseJobStep::writes) goto commitit; @@ -616,7 +620,7 @@ bool DatabaseWorkers::DatabaseJob(void*& arg){ printf("calling prepped for dev_config buffer '%s'\n",batch->devconfig_buffer.c_str()); try { tx->for_query(pqxx::prepped{"device_config_insert"}, - [&batch](int32_t new_version_num){ + [&batch](uint16_t new_version_num){ batch->devconfig_version_nums.push_back(new_version_num); }, pqxx::params{batch->devconfig_buffer}); ++(m_args->monitoring_vars->devconfig_submissions); @@ -637,7 +641,7 @@ bool DatabaseWorkers::DatabaseJob(void*& arg){ printf("calling prepped for run_config buffer '%s'\n",batch->runconfig_buffer.c_str()); try { tx->for_query(pqxx::prepped{"run_config_insert"}, - [&batch](int32_t new_version_num){ + [&batch](uint16_t new_version_num){ batch->runconfig_version_nums.push_back(new_version_num); }, pqxx::params{batch->runconfig_buffer}); ++(m_args->monitoring_vars->runconfig_submissions); @@ -658,7 +662,7 @@ bool DatabaseWorkers::DatabaseJob(void*& arg){ printf("calling prepped for calibration buffer '%s'\n",batch->calibration_buffer.c_str()); try { tx->for_query(pqxx::prepped{"calibration_insert"}, - [&batch](int32_t new_version_num){ + [&batch](uint16_t new_version_num){ batch->calibration_version_nums.push_back(new_version_num); }, pqxx::params{batch->calibration_buffer}); ++(m_args->monitoring_vars->calibration_submissions); @@ -679,7 +683,7 @@ bool DatabaseWorkers::DatabaseJob(void*& arg){ printf("calling prepped for rootplots buffer '%s'\n",batch->rootplot_buffer.c_str()); try { tx->for_query(pqxx::prepped{"rootplots_insert"}, - [&batch](int32_t new_version_num){ + [&batch](uint16_t new_version_num){ batch->rootplot_version_nums.push_back(new_version_num); }, pqxx::params{batch->rootplot_buffer}); ++(m_args->monitoring_vars->rootplot_submissions); @@ -700,7 +704,7 @@ bool DatabaseWorkers::DatabaseJob(void*& arg){ printf("calling prepped for plotlyplots buffer '%s'\n",batch->plotlyplot_buffer.c_str()); try { tx->for_query(pqxx::prepped{"plotlyplots_insert"}, - [&batch](int32_t new_version_num){ + [&batch](uint16_t new_version_num){ batch->plotlyplot_version_nums.push_back(new_version_num); }, pqxx::params{batch->plotlyplot_buffer}); ++(m_args->monitoring_vars->plotlyplot_submissions); diff --git a/UserTools/DatabaseWorkers/DatabaseWorkers.h b/UserTools/DatabaseWorkers/DatabaseWorkers.h index 8c1e5a6..043c602 100644 --- a/UserTools/DatabaseWorkers/DatabaseWorkers.h +++ b/UserTools/DatabaseWorkers/DatabaseWorkers.h @@ -31,10 +31,10 @@ struct DatabaseJobStruct { std::vector read_queue; std::vector write_queue; - std::vector logging_queue; - std::vector monitoring_queue; - std::vector rootplot_queue; - std::vector plotlyplot_queue; + std::vector logging_queue; + std::vector monitoring_queue; + std::vector rootplot_queue; + std::vector plotlyplot_queue; std::set bad_logs; std::set bad_mons; diff --git a/UserTools/MulticastWorkers/MulticastWorkers.cpp b/UserTools/MulticastWorkers/MulticastWorkers.cpp index 43ec3d5..58d363e 100644 --- a/UserTools/MulticastWorkers/MulticastWorkers.cpp +++ b/UserTools/MulticastWorkers/MulticastWorkers.cpp @@ -103,9 +103,13 @@ void MulticastWorkers::Thread(Thread_args* args){ // alternatively do we just over-write the job pointer with new args (potentially leaking it) } MulticastJobStruct* job_data = static_cast(the_job->data); - job_data->msg_buffer = m_args->local_msg_queue[i]; job_data->monitoring_vars = m_args->monitoring_vars; job_data->m_job_name = "multicast_worker"; + job_data->msg_buffer = m_args->local_msg_queue[i]; + job_data->logging_buffer = m_args->m_data->multicast_batch_pool.GetNew(); + job_data->monitoring_buffer = m_args->m_data->multicast_batch_pool.GetNew(); + job_data->rootplot_buffer = m_args->m_data->multicast_batch_pool.GetNew(); + job_data->plotlyplot_buffer = m_args->m_data->multicast_batch_pool.GetNew(); the_job->func = MulticastMessageJob; the_job->fail_func = MulticastMessageFail; @@ -179,10 +183,10 @@ bool MulticastWorkers::MulticastMessageJob(void*& arg){ printf("MulticastWorker job processing %d batches\n",m_args->msg_buffer->size()); - m_args->logging_buffer = "["; - m_args->monitoring_buffer = "["; - m_args->rootplot_buffer = "["; - m_args->plotlyplot_buffer = "["; + *m_args->logging_buffer = "["; + *m_args->monitoring_buffer = "["; + *m_args->rootplot_buffer = "["; + *m_args->plotlyplot_buffer = "["; // loop over messages for(std::string& next_msg : *m_args->msg_buffer){ @@ -201,16 +205,16 @@ bool MulticastWorkers::MulticastMessageJob(void*& arg){ switch(query_topic{next_msg[10]}){ case query_topic::logging: - m_args->out_buffer = &m_args->logging_buffer; + m_args->out_buffer = m_args->logging_buffer; break; case query_topic::monitoring: - m_args->out_buffer = &m_args->monitoring_buffer; + m_args->out_buffer = m_args->monitoring_buffer; break; case query_topic::rootplot: - m_args->out_buffer = &m_args->rootplot_buffer; + m_args->out_buffer = m_args->rootplot_buffer; break; case query_topic::plotlyplot: - m_args->out_buffer = &m_args->plotlyplot_buffer; + m_args->out_buffer = m_args->plotlyplot_buffer; break; default: printf("MCworkerJob: unknown multicast topic '%c' in message '%s'\n",next_msg[10],next_msg); @@ -226,27 +230,27 @@ bool MulticastWorkers::MulticastMessageJob(void*& arg){ } // pass into datamodel for DatabaseWorkers - if(m_args->logging_buffer.length()!=1){ - m_args->logging_buffer += "]"; + if(m_args->logging_buffer->length()!=1){ + *m_args->logging_buffer += "]"; std::unique_lock locker(m_args->m_data->log_query_queue_mtx); m_args->m_data->log_query_queue.push_back(m_args->logging_buffer); - printf("multicast worker adding '%s' to logging buffer\n",m_args->logging_buffer.c_str()); + printf("multicast worker adding '%s' to logging buffer\n",m_args->logging_buffer->c_str()); } - if(m_args->monitoring_buffer.length()!=1){ - m_args->monitoring_buffer += "]"; + if(m_args->monitoring_buffer->length()!=1){ + *m_args->monitoring_buffer += "]"; std::unique_lock locker(m_args->m_data->mon_query_queue_mtx); m_args->m_data->mon_query_queue.push_back(m_args->monitoring_buffer); } - if(m_args->rootplot_buffer.length()!=1){ - m_args->rootplot_buffer += "]"; + if(m_args->rootplot_buffer->length()!=1){ + *m_args->rootplot_buffer += "]"; std::unique_lock locker(m_args->m_data->rootplot_query_queue_mtx); m_args->m_data->rootplot_query_queue.push_back(m_args->rootplot_buffer); } - if(m_args->plotlyplot_buffer.length()!=1){ - m_args->plotlyplot_buffer += "]"; + if(m_args->plotlyplot_buffer->length()!=1){ + *m_args->plotlyplot_buffer += "]"; std::unique_lock locker(m_args->m_data->plotlyplot_query_queue_mtx); m_args->m_data->plotlyplot_query_queue.push_back(m_args->plotlyplot_buffer); } diff --git a/UserTools/MulticastWorkers/MulticastWorkers.h b/UserTools/MulticastWorkers/MulticastWorkers.h index fa3a738..4207f43 100644 --- a/UserTools/MulticastWorkers/MulticastWorkers.h +++ b/UserTools/MulticastWorkers/MulticastWorkers.h @@ -26,11 +26,11 @@ struct MulticastJobStruct { Pool* m_pool; std::string m_job_name; std::vector* msg_buffer; + std::string* logging_buffer; + std::string* monitoring_buffer; + std::string* rootplot_buffer; + std::string* plotlyplot_buffer; std::string* out_buffer; - std::string logging_buffer; - std::string monitoring_buffer; - std::string rootplot_buffer; - std::string plotlyplot_buffer; }; diff --git a/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.cpp b/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.cpp index 8248d6b..b22efc4 100644 --- a/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.cpp +++ b/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.cpp @@ -126,7 +126,7 @@ bool ReadQueryReceiverReplySender::Finalise(){ std::cerr<<"ReadReceiver thread terminated"<num_threads--; - std::unique_lock locker(m_args->m_data->managed_sockets_mtx); + std::unique_lock locker(m_data->managed_sockets_mtx); if(m_data->managed_sockets.count(remote_port_name)){ ManagedSocket* sock = m_data->managed_sockets[remote_port_name]; m_data->managed_sockets.erase(remote_port_name); From 316d6eeba5b7550b974bfe3c743da5619848ec58 Mon Sep 17 00:00:00 2001 From: Marcus O'Flaherty Date: Fri, 9 Jan 2026 20:08:51 +0000 Subject: [PATCH 09/12] split in/out poll as it always returns immediately if available listener, leading to no rate limiting. add Sleep Tool to rate limit the main execution thread. These prevent the application pegging cores when nothing is being done. --- UserTools/Factory/Factory.cpp | 1 + .../ReadQueryReceiverReplySender.cpp | 50 +++++++++++++++---- .../ReadQueryReceiverReplySender.h | 3 +- UserTools/Sleep/README.md | 1 + UserTools/Sleep/Sleep.cpp | 37 ++++++++++++++ UserTools/Sleep/Sleep.h | 34 +++++++++++++ UserTools/Unity.h | 1 + configfiles/middleman/SleepConfig | 2 + configfiles/middleman/ToolsConfig | 1 + 9 files changed, 120 insertions(+), 10 deletions(-) create mode 100644 UserTools/Sleep/README.md create mode 100644 UserTools/Sleep/Sleep.cpp create mode 100644 UserTools/Sleep/Sleep.h create mode 100644 configfiles/middleman/SleepConfig diff --git a/UserTools/Factory/Factory.cpp b/UserTools/Factory/Factory.cpp index 45fdef8..a015d63 100644 --- a/UserTools/Factory/Factory.cpp +++ b/UserTools/Factory/Factory.cpp @@ -17,5 +17,6 @@ if (tool=="ResultWorkers") ret=new ResultWorkers; if (tool=="JobManager") ret=new JobManager; //if (tool=="QueueTrimmer") ret=new QueueTrimmer; //if (tool=="MiddlemanNegotiate") ret=new MiddlemanNegotiate; + if (tool=="Sleep") ret=new Sleep; return ret; } diff --git a/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.cpp b/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.cpp index b22efc4..efcb0e2 100644 --- a/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.cpp +++ b/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.cpp @@ -85,8 +85,8 @@ bool ReadQueryReceiverReplySender::Initialise(std::string configfile, DataModel thread_args.socket = managed_socket->socket; thread_args.socket_mtx = &managed_socket->socket_mtx; thread_args.poll_timeout_ms = poll_timeout_ms; - thread_args.polls.emplace_back(*managed_socket->socket,0,ZMQ_POLLIN,0); - thread_args.polls.emplace_back(*managed_socket->socket,0,ZMQ_POLLOUT,0); + thread_args.in_poll = zmq::pollitem_t{*managed_socket->socket,0,ZMQ_POLLIN,0}; + thread_args.out_poll = zmq::pollitem_t{*managed_socket->socket,0,ZMQ_POLLOUT,0}; thread_args.in_local_queue = m_data->querybatch_pool.GetNew(local_buffer_size); thread_args.make_new = true; thread_args.local_buffer_size = local_buffer_size; @@ -179,28 +179,28 @@ void ReadQueryReceiverReplySender::Thread(Thread_args* args){ try { m_args->get_ok=0; std::unique_lock locker(*m_args->socket_mtx); - m_args->get_ok = zmq::poll(m_args->polls.data(), 2, m_args->poll_timeout_ms); + m_args->get_ok = zmq::poll(&m_args->in_poll, 1, m_args->poll_timeout_ms); } catch(zmq::error_t& err){ // ignore poll aborting due to signals if(zmq_errno()==EINTR) return; // this is probably fine - //std::cerr<m_tool_name<<" poll caught "<m_tool_name<<" in poll caught "<monitoring_vars->polls_failed); // m_args->running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? return; } catch(std::exception& err){ - std::cerr<m_tool_name<<" poll caught "<m_tool_name<<" in poll caught "<monitoring_vars->polls_failed); // m_args->running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? return; } catch(...){ - std::cerr<m_tool_name<<" poll caught "<m_tool_name<<" in poll caught "<monitoring_vars->polls_failed); // m_args->running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? return; } if(m_args->get_ok<0){ - std::cerr<m_tool_name<<" poll failed with "<m_tool_name<<" in poll failed with "<monitoring_vars->polls_failed); // m_args->running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? return; @@ -208,7 +208,7 @@ void ReadQueryReceiverReplySender::Thread(Thread_args* args){ // read // ==== - if(m_args->polls[0].revents & ZMQ_POLLIN){ + if(m_args->in_poll.revents & ZMQ_POLLIN){ printf("%s receiving message\n",m_args->m_tool_name.c_str()); if(m_args->make_new){ @@ -288,8 +288,40 @@ void ReadQueryReceiverReplySender::Thread(Thread_args* args){ // send next response message, if we have one in the queue if(m_args->out_local_queue!=nullptr && m_args->out_iout_local_queue->queries.size()){ + // poll + // ==== + try { + m_args->get_ok=0; + std::unique_lock locker(*m_args->socket_mtx); + m_args->get_ok = zmq::poll(&m_args->out_poll, 1, m_args->poll_timeout_ms); + } catch(zmq::error_t& err){ + // ignore poll aborting due to signals + if(zmq_errno()==EINTR) return; // this is probably fine + std::cerr<m_tool_name<<" out poll caught "<monitoring_vars->polls_failed); + // m_args->running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? + return; + } + catch(std::exception& err){ + std::cerr<m_tool_name<<" out poll caught "<monitoring_vars->polls_failed); + // m_args->running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? + return; + } catch(...){ + std::cerr<m_tool_name<<" out poll caught "<monitoring_vars->polls_failed); + // m_args->running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? + return; + } + if(m_args->get_ok<0){ + std::cerr<m_tool_name<<" out poll failed with "<monitoring_vars->polls_failed); + // m_args->running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? + return; + } + // check we had a listener ready - if(m_args->polls[1].revents & ZMQ_POLLOUT){ + if(m_args->out_poll.revents & ZMQ_POLLOUT){ printf("%s sending reply %d/%d\n",m_args->m_tool_name.c_str(),m_args->out_i,m_args->out_local_queue->queries.size()); // FIXME better logging diff --git a/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.h b/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.h index 80519d3..72998d8 100644 --- a/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.h +++ b/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.h @@ -26,7 +26,8 @@ struct ReadQueryReceiverReplySender_args : public Thread_args { std::mutex* socket_mtx; // for sharing the socket with ServicesManager Tool for finding clients int poll_timeout_ms; - std::vector polls; + zmq::pollitem_t in_poll; + zmq::pollitem_t out_poll; zmq::message_t msg_discard; bool make_new; int msg_parts; diff --git a/UserTools/Sleep/README.md b/UserTools/Sleep/README.md new file mode 100644 index 0000000..28e8dfa --- /dev/null +++ b/UserTools/Sleep/README.md @@ -0,0 +1 @@ +# DAQFramework diff --git a/UserTools/Sleep/Sleep.cpp b/UserTools/Sleep/Sleep.cpp new file mode 100644 index 0000000..4420f5a --- /dev/null +++ b/UserTools/Sleep/Sleep.cpp @@ -0,0 +1,37 @@ +#include "Sleep.h" + +Sleep::Sleep():Tool(){} + + +bool Sleep::Initialise(std::string configfile, DataModel &data){ + + InitialiseTool(data); + m_configfile = configfile; + InitialiseConfiguration(configfile); + //m_variables.Print(); + + ExportConfiguration(); + + unsigned int period_ms = 10; + m_variables.Get("period_ms",period_ms); + toolchain_period_ms = std::chrono::milliseconds{period_ms}; + + last_execute = std::chrono::steady_clock::now(); + + return true; +} + + +bool Sleep::Execute(){ + + std::this_thread::sleep_until(last_execute+toolchain_period_ms); + last_execute = std::chrono::steady_clock::now(); + + return true; +} + + +bool Sleep::Finalise(){ + + return true; +} diff --git a/UserTools/Sleep/Sleep.h b/UserTools/Sleep/Sleep.h new file mode 100644 index 0000000..d2667a3 --- /dev/null +++ b/UserTools/Sleep/Sleep.h @@ -0,0 +1,34 @@ +#ifndef Sleep_H +#define Sleep_H + +#include +#include + +#include "Tool.h" +#include "DataModel.h" + +/** +* \class Sleep +* +* This Tool simply sleeps to throttle the rate of the main ToolChain Execute loop, to prevent the main thread pegging a CPU core. It may be useful for highly threaded toolchains which do minimal work in Execute functions. +* +* $Author: Marcus O'Flaherty $ +* $Date: 2026/09/01 $ +*/ + +class Sleep: public Tool { + + public: + Sleep(); ///< Simple constructor + bool Initialise(std::string configfile,DataModel &data); ///< Initialise Function for setting up Tool resorces. @param configfile The path and name of the dynamic configuration file to read in. @param data A reference to the transient data class used to pass information between Tools. + bool Execute(); ///< Execute function used to perform Tool purpose + bool Finalise(); ///< Finalise function used to clean up resources. + + private: + std::chrono::time_point last_execute; + std::chrono::milliseconds toolchain_period_ms; + +}; + + +#endif diff --git a/UserTools/Unity.h b/UserTools/Unity.h index d46c484..2de0217 100644 --- a/UserTools/Unity.h +++ b/UserTools/Unity.h @@ -13,3 +13,4 @@ #include "QueueTrimmer.h" #include "MiddlemanNegotiate.h" */ +#include "Sleep.h" diff --git a/configfiles/middleman/SleepConfig b/configfiles/middleman/SleepConfig new file mode 100644 index 0000000..1ab4c09 --- /dev/null +++ b/configfiles/middleman/SleepConfig @@ -0,0 +1,2 @@ +verbose 1 +period_ms 10 diff --git a/configfiles/middleman/ToolsConfig b/configfiles/middleman/ToolsConfig index 5afb0ac..8072297 100644 --- a/configfiles/middleman/ToolsConfig +++ b/configfiles/middleman/ToolsConfig @@ -9,4 +9,5 @@ resultWorkers ResultWorkers configfiles/middleman/ResultWorkersConfig jobManager JobManager configfiles/middleman/JobManagerConfig socketManager SocketManager configfiles/middleman/SocketManagerConfig monitoring Monitoring configfiles/middleman/MonitoringConfig +sleep Sleep configfiles/middleman/SleepConfig From 9d8ed4f686d8c01ee257a22341c41fc29648353e Mon Sep 17 00:00:00 2001 From: Marcus O'Flaherty Date: Fri, 9 Jan 2026 20:49:27 +0000 Subject: [PATCH 10/12] bugfix to monitoring, add sleeps in workers when nothing to do to prevent cpu pegging --- DataModel/MonitoringVariables.h | 1 + UserTools/DatabaseWorkers/DatabaseWorkers.cpp | 5 ++++- UserTools/Monitoring/Monitoring.cpp | 2 +- .../MulticastReceiverSender.cpp | 4 ++-- UserTools/MulticastWorkers/MulticastWorkers.cpp | 13 ++++++++----- UserTools/ResultWorkers/ResultWorkers.cpp | 9 ++++----- UserTools/WriteWorkers/WriteWorkers.cpp | 3 +++ 7 files changed, 23 insertions(+), 14 deletions(-) diff --git a/DataModel/MonitoringVariables.h b/DataModel/MonitoringVariables.h index efe9317..6c35ef0 100644 --- a/DataModel/MonitoringVariables.h +++ b/DataModel/MonitoringVariables.h @@ -28,6 +28,7 @@ class MonitoringVariables { std::string ret; vars >> ret; std::string ret2 = toJSON(); + if(ret.length()==2) return ret2; // if nothing in Store, return result from toJSON if(!ret2.empty()){ ret.pop_back(); // remove trailing '}' ret2[0]=','; // replace leading '{' with ',' to concatenate the two diff --git a/UserTools/DatabaseWorkers/DatabaseWorkers.cpp b/UserTools/DatabaseWorkers/DatabaseWorkers.cpp index eb612c0..9bb3f1c 100644 --- a/UserTools/DatabaseWorkers/DatabaseWorkers.cpp +++ b/UserTools/DatabaseWorkers/DatabaseWorkers.cpp @@ -231,7 +231,10 @@ void DatabaseWorkers::Thread(Thread_args* args){ job_data->rootplot_queue.empty() && job_data->plotlyplot_queue.empty() && job_data->write_queue.empty() && - job_data->read_queue.empty()) return; + job_data->read_queue.empty()){ + usleep(100); + return; + } printf("DbJobDistributor making db job!\n"); job_data->m_job_name = "database_worker"; diff --git a/UserTools/Monitoring/Monitoring.cpp b/UserTools/Monitoring/Monitoring.cpp index 14f135c..db469af 100644 --- a/UserTools/Monitoring/Monitoring.cpp +++ b/UserTools/Monitoring/Monitoring.cpp @@ -78,7 +78,7 @@ void Monitoring::Thread(Thread_args* args){ for(std::pair& mon : m_args->m_data->monitoring_variables){ - std::string s="{\"time\":0, \"device\":\"middleman\",\"subject\":\""+mon.first+"\", \"data\":"+mon.second->GetJSON()+"}"; + std::string s="{\"topic\":\"Monitoring\", \"time\":\"now()\", \"device\":\"middleman\",\"subject\":\""+mon.first+"\", \"data\":"+mon.second->GetJSON()+"}"; // FIXME or just put into received queue for insertion to DB? std::unique_lock locker(m_args->m_data->out_mon_msg_queue_mtx); diff --git a/UserTools/MulticastReceiverSender/MulticastReceiverSender.cpp b/UserTools/MulticastReceiverSender/MulticastReceiverSender.cpp index bab3caf..8df8a6c 100644 --- a/UserTools/MulticastReceiverSender/MulticastReceiverSender.cpp +++ b/UserTools/MulticastReceiverSender/MulticastReceiverSender.cpp @@ -315,7 +315,7 @@ void MulticastReceiverSender::Thread(Thread_args* arg){ // ===== if(m_args->out_i < m_args->out_local_queue.size()){ - printf("sending %s message\n",m_args->m_tool_name.c_str()); + //printf("%s sending message\n",m_args->m_tool_name.c_str()); // Get the message std::string& message = m_args->out_local_queue[m_args->out_i++]; // always increment, even if error @@ -337,7 +337,7 @@ void MulticastReceiverSender::Thread(Thread_args* arg){ // else see if there are any in datamodel to grab std::unique_lock locker(*m_args->out_queue_mtx); if(!m_args->out_queue->empty()){ - printf("fetching new outgoing %s messages\n",m_args->m_tool_name.c_str()); + //printf("%s fetching new outgoing messages\n",m_args->m_tool_name.c_str()); std::swap(*m_args->out_queue, m_args->out_local_queue); ++(m_args->monitoring_vars->out_buffer_transfers); m_args->out_i=0; diff --git a/UserTools/MulticastWorkers/MulticastWorkers.cpp b/UserTools/MulticastWorkers/MulticastWorkers.cpp index 58d363e..9aa2e41 100644 --- a/UserTools/MulticastWorkers/MulticastWorkers.cpp +++ b/UserTools/MulticastWorkers/MulticastWorkers.cpp @@ -79,6 +79,9 @@ void MulticastWorkers::Thread(Thread_args* args){ std::unique_lock locker(m_args->m_data->in_multicast_msg_queue_mtx); if(!m_args->m_data->in_multicast_msg_queue.empty()){ std::swap(m_args->m_data->in_multicast_msg_queue, m_args->local_msg_queue); + } else { + usleep(100); + return; } locker.unlock(); @@ -181,7 +184,7 @@ bool MulticastWorkers::MulticastMessageJob(void*& arg){ // subsequently, all we need to do here is concatenate the JSONs - printf("MulticastWorker job processing %d batches\n",m_args->msg_buffer->size()); + printf("%s processing %d batches\n",m_args->m_job_name.c_str(), m_args->msg_buffer->size()); *m_args->logging_buffer = "["; *m_args->monitoring_buffer = "["; @@ -199,7 +202,7 @@ bool MulticastWorkers::MulticastMessageJob(void*& arg){ // printf("validating first 9 chars are topic: '%s', %d\n",next_msg.substr(0,9).c_str(),strcmp(next_msg.substr(0,9).c_str(),"{\"topic\":")); if(next_msg.substr(0,9)!="{\"topic\":"){ // FIXME log it as bad multicast - printf("Ignoring Bad multicast message '%s'\n",next_msg.c_str()); + printf("%s ignoring bad multicast message '%s'\n",m_args->m_job_name.c_str(), next_msg.c_str()); continue; } @@ -217,13 +220,13 @@ bool MulticastWorkers::MulticastMessageJob(void*& arg){ m_args->out_buffer = m_args->plotlyplot_buffer; break; default: - printf("MCworkerJob: unknown multicast topic '%c' in message '%s'\n",next_msg[10],next_msg); + printf("%s unknown multicast topic '%c' in message '%s'\n",m_args->m_job_name.c_str(), next_msg[10],next_msg.c_str()); continue; // FIXME unknown topic: error log it. } if(m_args->out_buffer->length()>1) (*m_args->out_buffer) += ", "; (*m_args->out_buffer) += next_msg; - printf("added message '%s'\n",next_msg.c_str()); + printf("%s added message '%s'\n",m_args->m_job_name.c_str(), next_msg.c_str()); ++(m_args->monitoring_vars->msgs_processed); @@ -234,7 +237,7 @@ bool MulticastWorkers::MulticastMessageJob(void*& arg){ *m_args->logging_buffer += "]"; std::unique_lock locker(m_args->m_data->log_query_queue_mtx); m_args->m_data->log_query_queue.push_back(m_args->logging_buffer); - printf("multicast worker adding '%s' to logging buffer\n",m_args->logging_buffer->c_str()); + printf("%s adding '%s' to logging buffer\n",m_args->m_job_name.c_str(), m_args->logging_buffer->c_str()); } if(m_args->monitoring_buffer->length()!=1){ diff --git a/UserTools/ResultWorkers/ResultWorkers.cpp b/UserTools/ResultWorkers/ResultWorkers.cpp index f0e45dc..a2ee797 100644 --- a/UserTools/ResultWorkers/ResultWorkers.cpp +++ b/UserTools/ResultWorkers/ResultWorkers.cpp @@ -66,7 +66,10 @@ void ResultWorkers::Thread(Thread_args* args){ // grab a batch of read queries, with results awaiting conversion std::unique_lock locker(m_args->m_data->query_results_mtx); - if(m_args->m_data->query_results.empty()) return; + if(m_args->m_data->query_results.empty()){ + usleep(100); + return; + } std::swap(m_args->m_data->query_results, m_args->local_msg_queue); locker.unlock(); @@ -99,10 +102,6 @@ void ResultWorkers::Thread(Thread_args* args){ } m_args->local_msg_queue.clear(); - // TODO add workers that also call setstatus /setversion on batch jobs and then pass them to send thread? - // maybe we can generalise to setreply if needed, depending on reply format & batching of read queries - // or do we just do this in the connection / reply sender thread(s)? - return; } diff --git a/UserTools/WriteWorkers/WriteWorkers.cpp b/UserTools/WriteWorkers/WriteWorkers.cpp index 096dbb7..a532eae 100644 --- a/UserTools/WriteWorkers/WriteWorkers.cpp +++ b/UserTools/WriteWorkers/WriteWorkers.cpp @@ -69,6 +69,9 @@ void WriteWorkers::Thread(Thread_args* args){ std::unique_lock locker(m_args->m_data->write_msg_queue_mtx); if(!m_args->m_data->write_msg_queue.empty()){ std::swap(m_args->m_data->write_msg_queue, m_args->local_msg_queue); + } else { + usleep(100); + return; } locker.unlock(); From 21d5edb9534c6a09d2eb652ccfff27d1f85ae95c Mon Sep 17 00:00:00 2001 From: marcus Date: Sun, 1 Feb 2026 02:36:50 +0000 Subject: [PATCH 11/12] improve/fix mutex contention and clean up printouts --- DataModel/ManagedSocket.h | 1 + DataModel/QueryBatch.h | 4 + DataModel/ZmqQuery.h | 24 ++++-- UserTools/DatabaseWorkers/DatabaseWorkers.cpp | 69 +++++++++------- UserTools/Monitoring/Monitoring.cpp | 26 ++++-- UserTools/Monitoring/Monitoring.h | 4 + .../MulticastReceiverSender.cpp | 58 ++++++++----- .../MulticastWorkers/MulticastWorkers.cpp | 19 +++-- .../ReadQueryReceiverReplySender.cpp | 82 ++++++++++++------- .../ReadQueryReceiverReplySender.h | 3 +- UserTools/ResultWorkers/ResultWorkers.cpp | 16 ++-- UserTools/SocketManager/SocketManager.cpp | 28 +++++-- UserTools/SocketManager/SocketManager.h | 5 ++ .../WriteQueryReceiver/WriteQueryReceiver.cpp | 43 ++++++---- .../WriteQueryReceiver/WriteQueryReceiver.h | 3 +- UserTools/WriteWorkers/WriteWorkers.cpp | 20 +++-- 16 files changed, 264 insertions(+), 141 deletions(-) diff --git a/DataModel/ManagedSocket.h b/DataModel/ManagedSocket.h index 661210f..807ddb5 100644 --- a/DataModel/ManagedSocket.h +++ b/DataModel/ManagedSocket.h @@ -7,6 +7,7 @@ struct ManagedSocket { std::mutex socket_mtx; + bool socket_manager_request=false; zmq::socket_t* socket=nullptr; std::string service_name; /* std::string remote_port;*/ diff --git a/DataModel/QueryBatch.h b/DataModel/QueryBatch.h index 94c4678..4156223 100644 --- a/DataModel/QueryBatch.h +++ b/DataModel/QueryBatch.h @@ -2,6 +2,7 @@ #define QUERY_BATCH_H #include +#include #include "ZmqQuery.h" @@ -39,6 +40,9 @@ struct QueryBatch { std::string plotlyplot_batch_err; std::string rootplot_batch_err; + // for debug + void push_time(std::string_view s){ for(auto&& query : queries) query.push_time(s); } + void reset(){ alarm_buffer = "["; devconfig_buffer = "["; diff --git a/DataModel/ZmqQuery.h b/DataModel/ZmqQuery.h index 3b682ac..7c1f598 100644 --- a/DataModel/ZmqQuery.h +++ b/DataModel/ZmqQuery.h @@ -1,6 +1,7 @@ #ifndef ZMQ_QUERY_H #define ZMQ_QUERY_H +#include // for debug #include #include @@ -67,14 +68,14 @@ struct ZmqQuery { // for setting responses of read queries void setresponserows(size_t n_rows){ - printf("ZmqQuery at %p set to %lu response rows\n",this, n_rows); + //printf("ZmqQuery at %p set to %lu response rows\n",this, n_rows); parts.resize(3+n_rows); return; } void setresponse(size_t row_num, std::string_view val){ //zmq_msg_init_size(&parts[row_num+3],row.size()); // mismatch zmq_msg_t* / zmq::message_t - printf("response part %lu set to %s on ZmqQuery at %p\n", row_num, val.data(), this); + //printf("response part %lu set to %s on ZmqQuery at %p\n", row_num, val.data(), this); new(&parts[row_num+3]) zmq::message_t(val.size()); // FIXME better way to call zmq_msg_init_size memcpy((void*)parts[row_num+3].data(),val.data(),val.size()); return; @@ -86,15 +87,28 @@ struct ZmqQuery { //zmq_msg_init_size(&parts[row_num+3],row.size()); // mismatch zmq_msg_t* / zmq::message_t // what a mess. but only printf bypasses our great overlord's wonderful logging decorations - std::ostringstream oss; - oss << val; - printf("response part %lu set to %s on ZmqQuery at %p\n", row_num, oss.str().c_str(), this); + //std::ostringstream oss; + //oss << val; + //printf("response part %lu set to %s on ZmqQuery at %p\n", row_num, oss.str().c_str(), this); new(&parts[row_num+3]) zmq::message_t(sizeof(val)); // FIXME better way to call zmq_msg_init_size memcpy((void*)parts[row_num+3].data(),&val,sizeof(val)); return; } + // FOR DEBUG + // --------- + std::vector>> times; + void push_time(std::string_view s){ times.emplace_back(s, std::chrono::system_clock::now()); } + void print_times(){ + push_time("reply_send"); + for(size_t i=1; i %s: %u ",times[i-1].first.c_str(), times[i].first.c_str(), std::chrono::duration_cast(times[i].second-times[i-1].second).count()); + } + printf("\n"); + } + // --------- + }; diff --git a/UserTools/DatabaseWorkers/DatabaseWorkers.cpp b/UserTools/DatabaseWorkers/DatabaseWorkers.cpp index 9bb3f1c..f656d8d 100644 --- a/UserTools/DatabaseWorkers/DatabaseWorkers.cpp +++ b/UserTools/DatabaseWorkers/DatabaseWorkers.cpp @@ -61,7 +61,7 @@ bool DatabaseWorkers::Initialise(std::string configfile, DataModel &data){ thread_args.monitoring_vars = &monitoring_vars; thread_args.job_queue = &database_jobqueue; if(!m_data->utils.CreateThread("database_job_distributor", &Thread, &thread_args)){ - Log(m_tool_name+": Failed to spawn background thread",v_error,m_verbose); + Log("Failed to spawn background thread",v_error,m_verbose); return false; } m_data->num_threads++; @@ -117,7 +117,7 @@ bool DatabaseWorkers::Execute(){ // FIXME ok but actually this kills all our jobs, not just our job distributor // so we don't want to do that. if(!thread_args.running){ - Log(m_tool_name+" Execute found thread not running!",v_error); + Log("Execute found thread not running!",v_error); Finalise(); Initialise(m_configfile, *m_data); // FIXME should we give up if Initialise returns false? should we set StopLoop to 1? ++(monitoring_vars.thread_crashes); @@ -130,13 +130,13 @@ bool DatabaseWorkers::Execute(){ bool DatabaseWorkers::Finalise(){ // signal job distributor thread to stop - Log(m_tool_name+": Joining job distributor thread",v_warning); + Log("Joining job distributor thread",v_warning); m_data->utils.KillThread(&thread_args); - Log(m_tool_name+": Finished",v_warning); + Log("Finished",v_warning); m_data->num_threads--; // deleting the worker pool manager will kill all the worker threads - Log(m_tool_name+": Joining database worker thread pool",v_warning); + Log("Joining database worker thread pool",v_warning); delete job_manager; job_manager = nullptr; m_data->num_threads--; @@ -144,7 +144,7 @@ bool DatabaseWorkers::Finalise(){ std::unique_lock locker(m_data->monitoring_variables_mtx); m_data->monitoring_variables.erase(m_tool_name); - Log(m_tool_name+": Finished",v_warning); + Log("Finished",v_warning); return true; } @@ -188,7 +188,7 @@ void DatabaseWorkers::Thread(Thread_args* args){ std::unique_lock locker(m_args->m_data->log_query_queue_mtx); if(!m_args->m_data->log_query_queue.empty()){ std::swap(m_args->m_data->log_query_queue, job_data->logging_queue); - printf("DbJobDistributor grabbed %d log batches\n",job_data->logging_queue.size()); + //printf("DbJobDistributor grabbed %d log batches\n",job_data->logging_queue.size()); } // grab monitoring queries @@ -213,14 +213,14 @@ void DatabaseWorkers::Thread(Thread_args* args){ locker = std::unique_lock(m_args->m_data->write_query_queue_mtx); if(!m_args->m_data->write_query_queue.empty()){ std::swap(m_args->m_data->write_query_queue, job_data->write_queue); - printf("DbJobDistributor grabbed %d write query batches\n",job_data->write_queue.size()); + //printf("DbJobDistributor grabbed %d write query batches\n",job_data->write_queue.size()); } // grab read queries locker = std::unique_lock(m_args->m_data->read_msg_queue_mtx); if(!m_args->m_data->read_msg_queue.empty()){ std::swap(m_args->m_data->read_msg_queue, job_data->read_queue); - printf("DbJobDistributor grabbed %d read query batches\n",job_data->read_queue.size()); + //printf("DbJobDistributor grabbed %d read query batches\n",job_data->read_queue.size()); } locker.unlock(); @@ -236,7 +236,7 @@ void DatabaseWorkers::Thread(Thread_args* args){ return; } - printf("DbJobDistributor making db job!\n"); + //printf("DbJobDistributor making db job!\n"); job_data->m_job_name = "database_worker"; m_args->job_queue->AddJob(m_args->the_job); @@ -279,6 +279,9 @@ void DatabaseWorkers::DatabaseJobFail(void*& arg){ std::cerr<m_job_name<<" failure"<monitoring_vars->jobs_failed); + //for(QueryBatch* q : m_args->read_queue) q->push_time("DB_spawn"); + //for(QueryBatch* q : m_args->write_queue) q->push_time("DB_spawn"); + // return our job args to the pool m_args->m_pool->Add(m_args); m_args = nullptr; // clear the local m_args variable... not strictly necessary @@ -292,7 +295,9 @@ void DatabaseWorkers::DatabaseJobFail(void*& arg){ bool DatabaseWorkers::DatabaseJob(void*& arg){ DatabaseJobStruct* m_args = static_cast(arg); - printf("DB worker starting!\n"); + //printf("DB worker starting!\n"); + //for(QueryBatch* q : m_args->read_queue) q->push_time("DB_start"); + //for(QueryBatch* q : m_args->write_queue) q->push_time("DB_start"); // the worker will need a connection to the database thread_local std::unique_ptr conn; @@ -340,10 +345,10 @@ bool DatabaseWorkers::DatabaseJob(void*& arg){ // XXX we could consider the latter, if it improved performance - the only drawback is we need to // re-sumbit all remaining queries each time one errors, which is more overhead the more we submit. pqxx::pipeline* px = new pqxx::pipeline(*tx); - printf("processing %d read query batches\n",m_args->read_queue.size()); + //printf("processing %d read query batches\n",m_args->read_queue.size()); for(QueryBatch* batch : m_args->read_queue){ - printf("pipelining batch of %d read queries\n",batch->queries.size()); + //printf("pipelining batch of %d read queries\n",batch->queries.size()); // if a query in the pipeline fails, all subsequent queries will also fail // so we'll need to go back and re-submit them. @@ -409,9 +414,11 @@ bool DatabaseWorkers::DatabaseJob(void*& arg){ // ok we're done with the pipeline: close it and detach, whatever that means. px->complete(); + //for(QueryBatch* q : m_args->read_queue) q->push_time("DB_done"); + // might as well pass them out for distribution now if(!m_args->read_queue.empty()){ - printf("returning %d read replies to datamodel\n", m_args->read_queue.size()); + //printf("returning %d read replies to datamodel\n", m_args->read_queue.size()); std::unique_lock locker(m_args->m_data->query_results_mtx); m_args->m_data->query_results.insert(m_args->m_data->query_results.end(), m_args->read_queue.begin(),m_args->read_queue.end()); @@ -444,7 +451,7 @@ bool DatabaseWorkers::DatabaseJob(void*& arg){ for(size_t i=0; ilast_i; ++i){ QueryBatch* batch = m_args->write_queue[i]; - printf("executing %d generic queries for next batch\n",batch->generic_query_indices.size()); + //printf("executing %d generic queries for next batch\n",batch->generic_query_indices.size()); size_t last_j = (m_args->endpoint==DatabaseJobStep::generics) ? m_args->endpoint_j : batch->generic_query_indices.size(); for(size_t j=m_args->checkpoint_j; jqueries[batch->generic_query_indices[j]]; @@ -478,11 +485,11 @@ bool DatabaseWorkers::DatabaseJob(void*& arg){ // insert new logging statements m_args->last_i = (m_args->endpoint==DatabaseJobStep::logging) ? m_args->endpoint_i : m_args->logging_queue.size(); - printf("calling prepped for %d logging batches\n",m_args->logging_queue.size()); + //printf("calling prepped for %d logging batches\n",m_args->logging_queue.size()); for(size_t i=0; ilast_i; ++i){ if(m_args->bad_logs.count(i)) continue; std::string* batch = m_args->logging_queue[i]; - printf("dbworker inserting logging batch: '%s'\n",batch->c_str()); + //printf("dbworker inserting logging batch: '%s'\n",batch->c_str()); try { tx->exec(pqxx::prepped{"logging_insert"}, pqxx::params{*batch}); ++(m_args->monitoring_vars->logging_submissions); @@ -508,7 +515,7 @@ bool DatabaseWorkers::DatabaseJob(void*& arg){ m_args->last_i = (m_args->endpoint==DatabaseJobStep::monitoring) ? m_args->endpoint_i : m_args->monitoring_queue.size(); // insert new monitoring statements - printf("calling prepped for %d monitoring batches\n",m_args->monitoring_queue.size()); + //printf("calling prepped for %d monitoring batches\n",m_args->monitoring_queue.size()); for(size_t i=0; ilast_i; ++i){ if(m_args->bad_mons.count(i)) continue; std::string* batch = m_args->monitoring_queue[i]; @@ -535,7 +542,7 @@ bool DatabaseWorkers::DatabaseJob(void*& arg){ m_args->last_i = (m_args->endpoint==DatabaseJobStep::rootplots) ? m_args->endpoint_i : m_args->rootplot_queue.size(); // insert new multicast rootplot statements - printf("calling prepped for %d rootplot batches\n",m_args->rootplot_queue.size()); + //printf("calling prepped for %d rootplot batches\n",m_args->rootplot_queue.size()); for(size_t i=0; ilast_i; ++i){ if(m_args->bad_rootplots.count(i)) continue; std::string* batch = m_args->rootplot_queue[i]; @@ -562,7 +569,7 @@ bool DatabaseWorkers::DatabaseJob(void*& arg){ m_args->last_i = (m_args->endpoint==DatabaseJobStep::plotlyplots) ? m_args->endpoint_i : m_args->plotlyplot_queue.size(); // insert new multicast plotlyplot statements - printf("calling prepped for %d plotlyplot batches\n",m_args->plotlyplot_queue.size()); + //printf("calling prepped for %d plotlyplot batches\n",m_args->plotlyplot_queue.size()); for(size_t i=0; ilast_i; ++i){ if(m_args->bad_plotlyplots.count(i)) continue; std::string* batch = m_args->plotlyplot_queue[i]; @@ -589,14 +596,14 @@ bool DatabaseWorkers::DatabaseJob(void*& arg){ m_args->last_i = (m_args->endpoint==DatabaseJobStep::writes) ? m_args->endpoint_i : m_args->write_queue.size(); // write queries - printf("processing %d write batches\n",m_args->write_queue.size()); + //printf("processing %d write batches\n",m_args->write_queue.size()); for(size_t i=0; ilast_i; ++i){ QueryBatch* batch = m_args->write_queue[i]; // the batch gets split up by WriteWorkers into a buffer for each type of write query // alarm insertions return nothing, just catch errors if(batch->got_alarms() && batch->alarm_batch_err.empty()){ - printf("calling prepped for alarm buffer '%s'\n",batch->alarm_buffer.c_str()); + //printf("calling prepped for alarm buffer '%s'\n",batch->alarm_buffer.c_str()); try { tx->exec(pqxx::prepped{"alarms_insert"}, pqxx::params{batch->alarm_buffer}); ++(m_args->monitoring_vars->alarm_submissions); @@ -620,7 +627,7 @@ bool DatabaseWorkers::DatabaseJob(void*& arg){ // device config insertions if(batch->got_devconfigs() && batch->devconfig_batch_err.empty()){ - printf("calling prepped for dev_config buffer '%s'\n",batch->devconfig_buffer.c_str()); + //printf("calling prepped for dev_config buffer '%s'\n",batch->devconfig_buffer.c_str()); try { tx->for_query(pqxx::prepped{"device_config_insert"}, [&batch](uint16_t new_version_num){ @@ -641,7 +648,7 @@ bool DatabaseWorkers::DatabaseJob(void*& arg){ // run config insertions if(batch->got_runconfigs() && batch->runconfig_batch_err.empty()){ - printf("calling prepped for run_config buffer '%s'\n",batch->runconfig_buffer.c_str()); + //printf("calling prepped for run_config buffer '%s'\n",batch->runconfig_buffer.c_str()); try { tx->for_query(pqxx::prepped{"run_config_insert"}, [&batch](uint16_t new_version_num){ @@ -662,7 +669,7 @@ bool DatabaseWorkers::DatabaseJob(void*& arg){ // calibration data insertions if(batch->got_calibrations() && batch->calibration_batch_err.empty()){ - printf("calling prepped for calibration buffer '%s'\n",batch->calibration_buffer.c_str()); + //printf("calling prepped for calibration buffer '%s'\n",batch->calibration_buffer.c_str()); try { tx->for_query(pqxx::prepped{"calibration_insert"}, [&batch](uint16_t new_version_num){ @@ -683,7 +690,7 @@ bool DatabaseWorkers::DatabaseJob(void*& arg){ // rootplot insertions if(batch->got_rootplots() && batch->rootplot_batch_err.empty()){ - printf("calling prepped for rootplots buffer '%s'\n",batch->rootplot_buffer.c_str()); + //printf("calling prepped for rootplots buffer '%s'\n",batch->rootplot_buffer.c_str()); try { tx->for_query(pqxx::prepped{"rootplots_insert"}, [&batch](uint16_t new_version_num){ @@ -704,7 +711,7 @@ bool DatabaseWorkers::DatabaseJob(void*& arg){ // plotlyplot insertions if(batch->got_plotlyplots() && batch->plotlyplot_batch_err.empty()){ - printf("calling prepped for plotlyplots buffer '%s'\n",batch->plotlyplot_buffer.c_str()); + //printf("calling prepped for plotlyplots buffer '%s'\n",batch->plotlyplot_buffer.c_str()); try { tx->for_query(pqxx::prepped{"plotlyplots_insert"}, [&batch](uint16_t new_version_num){ @@ -757,7 +764,7 @@ bool DatabaseWorkers::DatabaseJob(void*& arg){ // and all insertions to the database before that point (the checkpoint) will have been lost. // so loop back to the start and re-run up to the point of last error (endpoint) // this time skipping bad queries to hopefully avoid any errors - printf("%s encountered error, re-running up to checkpoint %d\n",m_args->m_job_name, m_args->endpoint); + //printf("%s encountered error, re-running up to checkpoint %d\n",m_args->m_job_name, m_args->endpoint); m_args->had_error=false; } while(true); // keep trying until we've submitted everything we can. @@ -768,15 +775,17 @@ bool DatabaseWorkers::DatabaseJob(void*& arg){ // committed succesfully, but that's probably easier to handle when uploading the file to DB // e.g. with 'ON CONFLICT' or somesuch + //for(QueryBatch* q : m_args->write_queue) q->push_time("DB_done"); + // pass the batch onto the next stage of the pipeline for the DatabaseWorkers if(!m_args->write_queue.empty()){ - printf("returning %d write acknowledgements to datamodel\n", m_args->write_queue.size()); + //printf("returning %d write acknowledgements to datamodel\n", m_args->write_queue.size()); std::unique_lock locker(m_args->m_data->query_results_mtx); m_args->m_data->query_results.insert(m_args->m_data->query_results.end(), m_args->write_queue.begin(),m_args->write_queue.end()); } - printf("%s completed\n",m_args->m_job_name.c_str()); + //printf("%s completed\n",m_args->m_job_name.c_str()); ++(m_args->monitoring_vars->jobs_completed); // return our job args to the pool diff --git a/UserTools/Monitoring/Monitoring.cpp b/UserTools/Monitoring/Monitoring.cpp index db469af..4118018 100644 --- a/UserTools/Monitoring/Monitoring.cpp +++ b/UserTools/Monitoring/Monitoring.cpp @@ -25,8 +25,10 @@ bool Monitoring::Initialise(std::string configfile, DataModel &data){ thread_args.last_send = std::chrono::steady_clock::now(); thread_args.m_data = m_data; thread_args.monitoring_vars = &monitoring_vars; + thread_mtx.lock(); + thread_args.thread_mtx = &thread_mtx; if(!m_data->utils.CreateThread("monitoring", &Thread, &thread_args)){ - Log(m_tool_name+": Failed to spawn background thread",v_error,m_verbose); + Log("Failed to spawn background thread",v_error,m_verbose); return false; } m_data->num_threads++; @@ -40,7 +42,7 @@ bool Monitoring::Initialise(std::string configfile, DataModel &data){ bool Monitoring::Execute(){ if(!thread_args.running){ - Log(m_tool_name+" Execute found thread not running!",v_error); + Log("Execute found thread not running!",v_error); Finalise(); Initialise(m_configfile, *m_data); // FIXME should we give up if Initialise returns false? should we set StopLoop to 1? // FIXME if restarts > X times in last Y mins, alarm (bypass, shove into DB? send to websocket?) and StopLoop. @@ -54,14 +56,17 @@ bool Monitoring::Execute(){ bool Monitoring::Finalise(){ // signal job distributor thread to stop - Log(m_tool_name+": Joining monitoring thread",v_warning); + Log("Joining monitoring thread",v_warning); + thread_args.running=false; + thread_mtx.unlock(); m_data->utils.KillThread(&thread_args); + Log("thread joined",v_warning); m_data->num_threads--; std::unique_lock locker(m_data->monitoring_variables_mtx); m_data->monitoring_variables.erase(m_tool_name); - Log(m_tool_name+": Finished",v_warning); + Log("Finished",v_warning); return true; } @@ -72,7 +77,7 @@ void Monitoring::Thread(Thread_args* args){ Monitoring_args* m_args = dynamic_cast(args); m_args->last_send = std::chrono::steady_clock::now(); - //printf("Monitoring sending stats\n"); + printf("Monitoring sending stats\n"); std::unique_lock locker(m_args->m_data->monitoring_variables_mtx); @@ -80,12 +85,14 @@ void Monitoring::Thread(Thread_args* args){ std::string s="{\"topic\":\"Monitoring\", \"time\":\"now()\", \"device\":\"middleman\",\"subject\":\""+mon.first+"\", \"data\":"+mon.second->GetJSON()+"}"; - // FIXME or just put into received queue for insertion to DB? - std::unique_lock locker(m_args->m_data->out_mon_msg_queue_mtx); + // use multicast so it also not only goes to DB but also shows up on web services + std::unique_lock locker2(m_args->m_data->out_mon_msg_queue_mtx); m_args->m_data->out_mon_msg_queue.push_back(s); } + locker.unlock(); + /* // FIXME calculate rates and stuff, expand monitoring in Tools // to calculate rates we need to know the difference in number @@ -192,7 +199,10 @@ void Monitoring::Thread(Thread_args* args){ */ - std::this_thread::sleep_until(m_args->last_send+m_args->monitoring_period_ms); + //std::this_thread::sleep_until(m_args->last_send+m_args->monitoring_period_ms); + // interruptible sleep - breaks early if Tool unlocks thread_mtx + std::unique_lock timed_locker(*m_args->thread_mtx, std::defer_lock); + timed_locker.try_lock_until(m_args->last_send+m_args->monitoring_period_ms); return; } diff --git a/UserTools/Monitoring/Monitoring.h b/UserTools/Monitoring/Monitoring.h index 62194c0..b480b4e 100644 --- a/UserTools/Monitoring/Monitoring.h +++ b/UserTools/Monitoring/Monitoring.h @@ -5,6 +5,7 @@ #include #include #include +#include #include "Tool.h" #include "DataModel.h" @@ -27,6 +28,7 @@ struct Monitoring_args : public Thread_args { std::chrono::time_point last_send; std::chrono::milliseconds monitoring_period_ms; std::stringstream ss; + std::timed_mutex* thread_mtx; }; @@ -42,6 +44,8 @@ class Monitoring: public Tool { Monitoring_args thread_args; MonitoringMonitoring monitoring_vars; + std::timed_mutex thread_mtx; + static bool ResetStats(bool reset); }; diff --git a/UserTools/MulticastReceiverSender/MulticastReceiverSender.cpp b/UserTools/MulticastReceiverSender/MulticastReceiverSender.cpp index 8df8a6c..6c39675 100644 --- a/UserTools/MulticastReceiverSender/MulticastReceiverSender.cpp +++ b/UserTools/MulticastReceiverSender/MulticastReceiverSender.cpp @@ -30,7 +30,7 @@ bool MulticastReceiverSender::Initialise(std::string configfile, DataModel &data m_variables.Get("type",type_str); if(type_str!="logging" && type_str!="monitoring"){ - Log(m_tool_name+": invalid port type '"+type_str+"'; valid values are 'logging' and 'monitoring'",v_error); + Log("invalid port type '"+type_str+"'; valid values are 'logging' and 'monitoring'",v_error); return false; } m_variables.Get("verbose",m_verbose); @@ -55,7 +55,7 @@ bool MulticastReceiverSender::Initialise(std::string configfile, DataModel &data socket_handle = socket(AF_INET, SOCK_DGRAM, 0); if(socket_handle<=0){ - Log(m_tool_name+": Failed to open multicast socket with error "+strerror(errno),v_error); + Log(std::string{"Failed to open multicast socket with error "}+strerror(errno),v_error); return false; } @@ -65,7 +65,7 @@ bool MulticastReceiverSender::Initialise(std::string configfile, DataModel &data l.l_linger = 0; // seconds to linger for get_ok = setsockopt(socket_handle, SOL_SOCKET, SO_LINGER, (char*) &l, sizeof(l)); if(get_ok!=0){ - Log(m_tool_name+": Failed to set multicast socket linger with error "+strerror(errno),v_error); + Log(std::string{"Failed to set multicast socket linger with error "}+strerror(errno),v_error); return false; } @@ -75,14 +75,14 @@ bool MulticastReceiverSender::Initialise(std::string configfile, DataModel &data int a =1; get_ok = setsockopt(socket_handle, SOL_SOCKET, SO_REUSEADDR, &a, sizeof(a)); if(get_ok!=0){ - Log(m_tool_name+": Failed to set multicast socket reuseaddr with error "+strerror(errno),v_error); + Log(std::string{"Failed to set multicast socket reuseaddr with error "}+strerror(errno),v_error); return false; } // set the socket to non-blocking mode - should be irrelevant as we poll get_ok = fcntl(socket_handle, F_SETFL, O_NONBLOCK); if(get_ok!=0){ - Log(m_tool_name+": Failed to set multicast socket to non-blocking with error "+strerror(errno),v_warning); + Log(std::string{"Failed to set multicast socket to non-blocking with error "}+strerror(errno),v_warning); } @@ -104,7 +104,7 @@ bool MulticastReceiverSender::Initialise(std::string configfile, DataModel &data // sending: which multicast group to send to get_ok = inet_aton(multicast_address.c_str(), &addr.sin_addr); if(get_ok==0){ // returns 0 if invalid, unlike other functions - Log(m_tool_name+": Bad multicast address '"+multicast_address+"'",v_error); + Log("Bad multicast address '"+multicast_address+"'",v_error); return false; } @@ -123,7 +123,7 @@ bool MulticastReceiverSender::Initialise(std::string configfile, DataModel &data // to listen we need to bind to the socket get_ok = (bind(socket_handle, (struct sockaddr*)&addr, addrlen) == 0); if(!get_ok) { - Log(m_tool_name+": Failed to bind to multicast listen socket",v_error); + Log("Failed to bind to multicast listen socket",v_error); return false; } @@ -132,12 +132,12 @@ bool MulticastReceiverSender::Initialise(std::string configfile, DataModel &data mreq.imr_interface.s_addr = htonl(INADDR_ANY); get_ok = inet_aton(multicast_address.c_str(), &mreq.imr_multiaddr); if(get_ok==0){ - Log(m_tool_name+": Bad multicast group '"+multicast_address+"'",v_error); + Log("Bad multicast group '"+multicast_address+"'",v_error); return false; } get_ok = setsockopt(socket_handle, IPPROTO_IP, IP_ADD_MEMBERSHIP, &mreq, sizeof(mreq)); if(get_ok!=0){ - Log(m_tool_name+": Failed to join multicast group",v_error); + Log("Failed to join multicast group",v_error); return false; } @@ -175,7 +175,7 @@ bool MulticastReceiverSender::Initialise(std::string configfile, DataModel &data // thread needs a unique name printf("spawning %s send/receiver thread\n",type_str.c_str()); if(!m_data->utils.CreateThread(type_str+"_sendreceiver", &Thread, &thread_args)){ - Log(m_tool_name+": Failed to spawn background thread",v_error,m_verbose); + Log("Failed to spawn background thread",v_error,m_verbose); return false; } m_data->num_threads++; @@ -187,15 +187,34 @@ bool MulticastReceiverSender::Initialise(std::string configfile, DataModel &data bool MulticastReceiverSender::Execute(){ if(!thread_args.running){ - Log(m_tool_name+" Execute found thread not running!",v_error); + Log("Execute found thread not running!",v_error); Finalise(); Initialise(m_configfile, *m_data); // FIXME should we give up if Initialise returns false? should we set StopLoop to 1? // FIXME if restarts > X times in last Y mins, alarm (bypass, shove into DB? send to websocket?) and StopLoop. ++(monitoring_vars.thread_crashes); } + + // Hmmm, throttling of the main thread is specified in the Sleep tool, but that's fairly short + // that means these variables are being updated thousands of times a second. + // Pro: monitoring info is up-to-date when it goes out + // Con: wasteful... + // FIX: update 1/10th monitoring interval? synchronise with Monitoring Tool? monitoring_vars.Set("buffered_in_messages",thread_args.in_local_queue->size()); monitoring_vars.Set("waiting_out_messages",thread_args.out_local_queue.size()); - //monitoring_vars.Set("last_transfer",thread_args.last_transfer); // FIXME FIXME FIXME need cast to string + + /* + actually we can't do this. steady_clock is what we want for regular tasks, + but cannot be converted to a meaningful time unless we manually keep some + reference time for conversion. Even then, it may drift as, by definition, + it does not necessarily stay in sync with system_clock. Not sure what to do about that! + // aren't you glad we have std::chrono to avoid all that c-style time jank? + time_t lt = thread_args.last_transfer.time_since_epoch().count(); + struct tm lt_s; + localtime_r(<, <_s); + char tbuf[50]; + strftime(tbuf, 50, "%F %T%z",<_s); + monitoring_vars.Set("last_transfer",tbuf); + */ return true; } @@ -204,8 +223,7 @@ bool MulticastReceiverSender::Execute(){ bool MulticastReceiverSender::Finalise(){ // signal background receiver thread to stop - //Log(m_tool_name+": Joining receiver thread",v_warning); - printf("joining %s receiver thread\n",m_tool_name.c_str()); + //Log("Joining receiver thread",v_warning); m_data->utils.KillThread(&thread_args); m_data->num_threads--; @@ -224,7 +242,7 @@ bool MulticastReceiverSender::Finalise(){ if(socket_handle>0){ get_ok = close(socket_handle); if(get_ok!=0){ - Log(m_tool_name+": Error closing socket "+strerror(errno),v_error); + Log(std::string{"Error closing socket "}+strerror(errno),v_error); return false; } } @@ -232,7 +250,7 @@ bool MulticastReceiverSender::Finalise(){ locker = std::unique_lock(m_data->monitoring_variables_mtx); m_data->monitoring_variables.erase(m_tool_name); - Log(m_tool_name+": Finished",v_warning); + Log("Finished",v_warning); return true; } @@ -247,7 +265,7 @@ void MulticastReceiverSender::Thread(Thread_args* arg){ ((m_args->in_local_queue->size()>m_args->local_buffer_size) || (std::chrono::steady_clock::now() - m_args->last_transfer) > m_args->transfer_period_ms) ){ - printf("adding %d %s messages to datamodel\n",m_args->in_local_queue->size(), m_args->m_tool_name.c_str()); + //printf("adding %d %s messages to datamodel\n",m_args->in_local_queue->size(), m_args->m_tool_name.c_str()); std::unique_lock locker(*m_args->in_queue_mtx); m_args->in_queue->push_back(m_args->in_local_queue); @@ -289,7 +307,7 @@ void MulticastReceiverSender::Thread(Thread_args* arg){ // read // ==== if(m_args->poll.revents & ZMQ_POLLIN){ - printf("%s receiving message\n",m_args->m_tool_name.c_str()); + //printf("%s receiving message\n",m_args->m_tool_name.c_str()); // read the messge m_args->get_ok = recvfrom(m_args->socket, m_args->message, MAX_UDP_PACKET_SIZE, 0, (struct sockaddr*)&m_args->addr, &m_args->addrlen); @@ -303,7 +321,7 @@ void MulticastReceiverSender::Thread(Thread_args* arg){ } else { ++(m_args->monitoring_vars->msgs_rcvd); - //m_data->Log(m_tool_name+": Received multicast message '"+std::string(m_args->message) + //m_data->Log("Received multicast message '"+std::string(m_args->message) // +"' from "+std::string{inet_ntoa(&m_args->addr->sin_addr)},12); m_args->in_local_queue->emplace_back(m_args->message); @@ -325,7 +343,7 @@ void MulticastReceiverSender::Thread(Thread_args* arg){ // check success if(cnt < 0){ - //m_data->Log(m_tool_name+": Error sending multicast message: "+strerror(errno),v_error); // FIXME ensure this isn't circular + //m_data->Log("Error sending multicast message: "+strerror(errno),v_error); // FIXME ensure this isn't circular ++(m_args->monitoring_vars->send_fails); } else { ++(m_args->monitoring_vars->msgs_sent); diff --git a/UserTools/MulticastWorkers/MulticastWorkers.cpp b/UserTools/MulticastWorkers/MulticastWorkers.cpp index 9aa2e41..f0cd1db 100644 --- a/UserTools/MulticastWorkers/MulticastWorkers.cpp +++ b/UserTools/MulticastWorkers/MulticastWorkers.cpp @@ -30,7 +30,7 @@ bool MulticastWorkers::Initialise(std::string configfile, DataModel &data){ thread_args.monitoring_vars = &monitoring_vars; // thread needs a unique name if(!m_data->utils.CreateThread("multicast_job_distributor", &Thread, &thread_args)){ - Log(m_tool_name+": Failed to spawn background thread",v_error,m_verbose); + Log("Failed to spawn background thread",v_error,m_verbose); return false; } m_data->num_threads++; @@ -43,7 +43,7 @@ bool MulticastWorkers::Execute(){ // FIXME ok but actually this kills all our jobs, not just our job distributor // so we don't want to do that. if(!thread_args.running){ - Log(m_tool_name+" Execute found thread not running!",v_error); + Log("Execute found thread not running!",v_error); Finalise(); Initialise(m_configfile, *m_data); // FIXME should we give up if Initialise returns false? should we set StopLoop to 1? ++(monitoring_vars.thread_crashes); @@ -55,7 +55,7 @@ bool MulticastWorkers::Execute(){ bool MulticastWorkers::Finalise(){ // signal job distributor thread to stop - Log(m_tool_name+": Joining receiver thread",v_warning); + Log("Joining receiver thread",v_warning); m_data->utils.KillThread(&thread_args); m_data->num_threads--; @@ -65,7 +65,7 @@ bool MulticastWorkers::Finalise(){ std::unique_lock locker(m_data->monitoring_variables_mtx); m_data->monitoring_variables.erase(m_tool_name); - Log(m_tool_name+": Finished",v_warning); + Log("Finished",v_warning); return true; } @@ -80,6 +80,7 @@ void MulticastWorkers::Thread(Thread_args* args){ if(!m_args->m_data->in_multicast_msg_queue.empty()){ std::swap(m_args->m_data->in_multicast_msg_queue, m_args->local_msg_queue); } else { + locker.unlock(); usleep(100); return; } @@ -118,7 +119,7 @@ void MulticastWorkers::Thread(Thread_args* args){ the_job->fail_func = MulticastMessageFail; //multicast_jobs.AddJob(the_job); - printf("spawning new multicastjob for %d messages\n",job_data->msg_buffer->size()); + //printf("spawning new multicastjob for %d messages\n",job_data->msg_buffer->size()); m_args->m_data->job_queue.AddJob(the_job); } @@ -184,7 +185,7 @@ bool MulticastWorkers::MulticastMessageJob(void*& arg){ // subsequently, all we need to do here is concatenate the JSONs - printf("%s processing %d batches\n",m_args->m_job_name.c_str(), m_args->msg_buffer->size()); + //printf("%s processing %d batches\n",m_args->m_job_name.c_str(), m_args->msg_buffer->size()); *m_args->logging_buffer = "["; *m_args->monitoring_buffer = "["; @@ -226,7 +227,7 @@ bool MulticastWorkers::MulticastMessageJob(void*& arg){ if(m_args->out_buffer->length()>1) (*m_args->out_buffer) += ", "; (*m_args->out_buffer) += next_msg; - printf("%s added message '%s'\n",m_args->m_job_name.c_str(), next_msg.c_str()); + //printf("%s added message '%s'\n",m_args->m_job_name.c_str(), next_msg.c_str()); ++(m_args->monitoring_vars->msgs_processed); @@ -237,7 +238,7 @@ bool MulticastWorkers::MulticastMessageJob(void*& arg){ *m_args->logging_buffer += "]"; std::unique_lock locker(m_args->m_data->log_query_queue_mtx); m_args->m_data->log_query_queue.push_back(m_args->logging_buffer); - printf("%s adding '%s' to logging buffer\n",m_args->m_job_name.c_str(), m_args->logging_buffer->c_str()); + //printf("%s adding '%s' to logging buffer\n",m_args->m_job_name.c_str(), m_args->logging_buffer->c_str()); } if(m_args->monitoring_buffer->length()!=1){ @@ -262,7 +263,7 @@ bool MulticastWorkers::MulticastMessageJob(void*& arg){ m_args->msg_buffer->clear(); m_args->m_data->multicast_buffer_pool.Add(m_args->msg_buffer); - printf("%s job completed\n",m_args->m_job_name.c_str()); + //printf("%s job completed\n",m_args->m_job_name.c_str()); ++(m_args->monitoring_vars->jobs_completed); m_args->m_pool->Add(m_args); // return our job args to the job args struct pool diff --git a/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.cpp b/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.cpp index efcb0e2..32023dd 100644 --- a/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.cpp +++ b/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.cpp @@ -54,7 +54,7 @@ bool ReadQueryReceiverReplySender::Initialise(std::string configfile, DataModel managed_socket->socket->setsockopt(ZMQ_BACKLOG,conns_backlog); managed_socket->socket->setsockopt(ZMQ_LINGER, 10); // make reply socket error, rather than silently drop, if the destination is unreachable - managed_socket->socket->setsockopt(ZMQ_ROUTER_MANDATORY, 1); // FIXME do we want this? + managed_socket->socket->setsockopt(ZMQ_ROUTER_MANDATORY, 1); // make router transfer connections with an already seen ZMQ_IDENTITY to a new connection // rather than rejecting the new connection attempt // FIXME need to update ZMQ version to enable, but we should do this @@ -82,8 +82,7 @@ bool ReadQueryReceiverReplySender::Initialise(std::string configfile, DataModel thread_args.m_data = m_data; thread_args.m_tool_name = m_tool_name; thread_args.monitoring_vars = &monitoring_vars; - thread_args.socket = managed_socket->socket; - thread_args.socket_mtx = &managed_socket->socket_mtx; + thread_args.mgd_sock = managed_socket; thread_args.poll_timeout_ms = poll_timeout_ms; thread_args.in_poll = zmq::pollitem_t{*managed_socket->socket,0,ZMQ_POLLIN,0}; thread_args.out_poll = zmq::pollitem_t{*managed_socket->socket,0,ZMQ_POLLOUT,0}; @@ -95,7 +94,7 @@ bool ReadQueryReceiverReplySender::Initialise(std::string configfile, DataModel // thread needs a unique name if(!m_data->utils.CreateThread("readrep_sendreceiver", &Thread, &thread_args)){ - Log(m_tool_name+": Failed to spawn background thread",v_error,m_verbose); + Log("Failed to spawn background thread",v_error,m_verbose); return false; } m_data->num_threads++; @@ -107,7 +106,7 @@ bool ReadQueryReceiverReplySender::Initialise(std::string configfile, DataModel bool ReadQueryReceiverReplySender::Execute(){ if(!thread_args.running){ - Log(m_tool_name+" Execute found thread not running!",v_error); + Log("Execute found thread not running!",v_error); Finalise(); Initialise(m_configfile, *m_data); // FIXME should we give up if Initialise returns false? should we set StopLoop to 1? ++(monitoring_vars.thread_crashes); @@ -121,9 +120,9 @@ bool ReadQueryReceiverReplySender::Execute(){ bool ReadQueryReceiverReplySender::Finalise(){ // signal background receiver thread to stop - Log(m_tool_name+": Joining receiver thread",v_warning); + Log("Joining receiver thread",v_warning); m_data->utils.KillThread(&thread_args); - std::cerr<<"ReadReceiver thread terminated"<num_threads--; std::unique_lock locker(m_data->managed_sockets_mtx); @@ -138,7 +137,7 @@ bool ReadQueryReceiverReplySender::Finalise(){ locker = std::unique_lock(m_data->monitoring_variables_mtx); m_data->monitoring_variables.erase(m_tool_name); - Log(m_tool_name+": Finished",v_warning); + Log("Finished",v_warning); return true; } @@ -157,8 +156,9 @@ void ReadQueryReceiverReplySender::Thread(Thread_args* args){ if(!m_args->make_new) m_args->in_local_queue->queries.pop_back(); - printf("%s adding %ld messages to datamodel\n",m_args->m_tool_name.c_str(),m_args->in_local_queue->queries.size()); + //printf("%s adding %ld messages to datamodel\n",m_args->m_tool_name.c_str(),m_args->in_local_queue->queries.size()); + //m_args->in_local_queue->push_time("receiver_to_DM"); std::unique_lock locker(m_args->m_data->read_msg_queue_mtx); m_args->m_data->read_msg_queue.push_back(m_args->in_local_queue); locker.unlock(); @@ -174,11 +174,16 @@ void ReadQueryReceiverReplySender::Thread(Thread_args* args){ } + // poll // ==== try { m_args->get_ok=0; - std::unique_lock locker(*m_args->socket_mtx); + // give priority to socket manager, otherwise we may lock it too frequently and prevent it getting access + while(m_args->mgd_sock->socket_manager_request){ + usleep(1); + } + std::unique_lock locker(m_args->mgd_sock->socket_mtx); m_args->get_ok = zmq::poll(&m_args->in_poll, 1, m_args->poll_timeout_ms); } catch(zmq::error_t& err){ // ignore poll aborting due to signals @@ -186,30 +191,34 @@ void ReadQueryReceiverReplySender::Thread(Thread_args* args){ std::cerr<m_tool_name<<" in poll caught "<monitoring_vars->polls_failed); // m_args->running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? + usleep(10); return; } catch(std::exception& err){ std::cerr<m_tool_name<<" in poll caught "<monitoring_vars->polls_failed); // m_args->running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? + usleep(10); return; } catch(...){ std::cerr<m_tool_name<<" in poll caught "<monitoring_vars->polls_failed); // m_args->running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? + usleep(10); return; } if(m_args->get_ok<0){ std::cerr<m_tool_name<<" in poll failed with "<monitoring_vars->polls_failed); // m_args->running=false; // FIXME Handle other errors? or just globally via restarting thread? or throw? + usleep(10); return; } // read // ==== if(m_args->in_poll.revents & ZMQ_POLLIN){ - printf("%s receiving message\n",m_args->m_tool_name.c_str()); + //printf("%s receiving message\n",m_args->m_tool_name.c_str()); if(m_args->make_new){ m_args->in_local_queue->queries.emplace_back(); @@ -222,16 +231,20 @@ void ReadQueryReceiverReplySender::Thread(Thread_args* args){ static constexpr char part_order[4] = {0,2,1,3}; m_args->msg_parts=0; + // debug only, remove + //msg_buf.times.clear(); + //msg_buf.push_time("recieve"); + try { - std::unique_lock locker(*m_args->socket_mtx); - printf("%s receiving part...",m_args->m_tool_name.c_str()); + std::unique_lock locker(m_args->mgd_sock->socket_mtx); + //printf("%s receiving part...",m_args->m_tool_name.c_str()); do { - m_args->get_ok = m_args->socket->recv(&msg_buf[part_order[std::min(3,m_args->msg_parts++)]]); - printf("%d=%d (more: %d),...",m_args->msg_parts,m_args->get_ok,msg_buf[part_order[std::min(3,m_args->msg_parts-1)]].more()); + m_args->get_ok = m_args->mgd_sock->socket->recv(&msg_buf[part_order[std::min(3,m_args->msg_parts++)]]); + //printf("%d=%d (more: %d),...",m_args->msg_parts,m_args->get_ok,msg_buf[part_order[std::min(3,m_args->msg_parts-1)]].more()); } while(m_args->get_ok && msg_buf[part_order[std::min(3,m_args->msg_parts-1)]].more()); locker.unlock(); - printf("\n"); + //printf("\n"); // if the read failed, discard the message if(!m_args->get_ok){ @@ -248,9 +261,7 @@ void ReadQueryReceiverReplySender::Thread(Thread_args* args){ snprintf(&msg_str[0], msg_buf[part_order[i]].size()+1, "%s", msg_buf[part_order[i]].data()); printf("\tpart %d: %s\n",i, msg_str); } - // FIXME print other info we have (client, message, parts) to help identify culprit - // FIXME do we do this? for efficiency? here? do we add a flag for bad and do it in the processing? - // FIXME do we try to make a query out of the first 4 parts? i'm gonna say no, for now + // FIXME Log this? here? do we add a flag for bad and do it in the processing? ++(m_args->monitoring_vars->bad_msgs); // else success @@ -258,7 +269,8 @@ void ReadQueryReceiverReplySender::Thread(Thread_args* args){ m_args->make_new=true; ++(m_args->monitoring_vars->msgs_rcvd); - printf("%s received query %u, '%s' message '%s' into ZmqQuery at %p, %p\n",m_args->m_tool_name.c_str(), msg_buf.msg_id(), msg_buf.topic().data(), msg_buf.msg().data(), &msg_buf, &msg_buf.parts[3]); + // XXX + //printf("%s received query %u, '%s' message '%s' into ZmqQuery at %p\n",m_args->m_tool_name.c_str(), msg_buf.msg_id(), msg_buf.topic().data(), msg_buf.msg().data(), &msg_buf); } @@ -292,7 +304,7 @@ void ReadQueryReceiverReplySender::Thread(Thread_args* args){ // ==== try { m_args->get_ok=0; - std::unique_lock locker(*m_args->socket_mtx); + std::unique_lock locker(m_args->mgd_sock->socket_mtx); m_args->get_ok = zmq::poll(&m_args->out_poll, 1, m_args->poll_timeout_ms); } catch(zmq::error_t& err){ // ignore poll aborting due to signals @@ -323,21 +335,34 @@ void ReadQueryReceiverReplySender::Thread(Thread_args* args){ // check we had a listener ready if(m_args->out_poll.revents & ZMQ_POLLOUT){ - printf("%s sending reply %d/%d\n",m_args->m_tool_name.c_str(),m_args->out_i,m_args->out_local_queue->queries.size()); // FIXME better logging + //printf("%s sending reply %d/%d\n",m_args->m_tool_name.c_str(),m_args->out_i,m_args->out_local_queue->queries.size()); ZmqQuery& rep = m_args->out_local_queue->queries[m_args->out_i++]; // FIXME maybe don't pop (increment out_i) until send succeeds? // FIXME maybe impelement 'retries' mechanism as previously? - printf("reply to message %u has %d parts\n", rep.msg_id(), rep.size()); + + // response parts are [client,msg_id, success, results...] + + //printf("reply to message %u has %d parts\n", rep.msg_id(), rep.size()); + /* + uint32_t turnaround = std::chrono::duration_cast(std::chrono::system_clock::now()-rep.times[0].second).count(); + if(rep.size()>3){ + printf("%s turnaround of %u ms on response '%s' to message %u\n",m_args->m_tool_name.c_str(), turnaround, rep[3].data(), *(uint32_t*)(rep[1].data())); + } else { + printf("%s turnaround of %u ms on ack %u to message %u\n",m_args->m_tool_name.c_str(), turnaround, *(uint32_t*)rep[2].data(), *(uint32_t*)(rep[1].data())); + } + rep.print_times(); + */ + try { - std::unique_lock locker(*m_args->socket_mtx); + std::unique_lock locker(m_args->mgd_sock->socket_mtx); for(size_t i=0; iget_ok = m_args->socket->send(rep[i], ZMQ_SNDMORE); + m_args->get_ok = m_args->mgd_sock->socket->send(rep[i], ZMQ_SNDMORE); if(!m_args->get_ok) break; } - if(m_args->get_ok) m_args->get_ok = m_args->socket->send(rep[rep.size()-1]); + if(m_args->get_ok) m_args->get_ok = m_args->mgd_sock->socket->send(rep[rep.size()-1]); locker.unlock(); if(!m_args->get_ok){ @@ -356,7 +381,7 @@ void ReadQueryReceiverReplySender::Thread(Thread_args* args){ rep.parts.resize(0); // safety to prevent accidentally accessing sent messages, which can segfault // else success - printf("%s reply at %p sent\n",m_args->m_tool_name.c_str(), &rep); + //printf("%s reply at %p sent\n",m_args->m_tool_name.c_str(), &rep); ++(m_args->monitoring_vars->msgs_sent); } catch(zmq::error_t& err){ @@ -383,7 +408,7 @@ void ReadQueryReceiverReplySender::Thread(Thread_args* args){ std::unique_lock locker(m_args->m_data->query_replies_mtx); if(!m_args->m_data->query_replies.empty()){ - printf("%s fetching new replies\n",m_args->m_tool_name.c_str()); + //printf("%s fetching new replies\n",m_args->m_tool_name.c_str()); // return our batch to the pool if applicable if(m_args->out_local_queue!=nullptr){ @@ -394,6 +419,7 @@ void ReadQueryReceiverReplySender::Thread(Thread_args* args){ // grab a new batch m_args->out_local_queue = m_args->m_data->query_replies.front(); + //m_args->out_local_queue->push_time("reply_fetch"); m_args->m_data->query_replies.pop_front(); ++(m_args->monitoring_vars->out_buffer_transfers); diff --git a/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.h b/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.h index 72998d8..091f78d 100644 --- a/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.h +++ b/UserTools/ReadQueryReceiverReplySender/ReadQueryReceiverReplySender.h @@ -22,8 +22,7 @@ struct ReadQueryReceiverReplySender_args : public Thread_args { std::string m_tool_name; DataModel* m_data; ReadReceiveMonitoring* monitoring_vars; - zmq::socket_t* socket=nullptr; - std::mutex* socket_mtx; // for sharing the socket with ServicesManager Tool for finding clients + ManagedSocket* mgd_sock=nullptr; int poll_timeout_ms; zmq::pollitem_t in_poll; diff --git a/UserTools/ResultWorkers/ResultWorkers.cpp b/UserTools/ResultWorkers/ResultWorkers.cpp index a2ee797..6f6e5e2 100644 --- a/UserTools/ResultWorkers/ResultWorkers.cpp +++ b/UserTools/ResultWorkers/ResultWorkers.cpp @@ -21,7 +21,7 @@ bool ResultWorkers::Initialise(std::string configfile, DataModel &data){ thread_args.m_data = m_data; thread_args.monitoring_vars = &monitoring_vars; if(!m_data->utils.CreateThread("result_job_distributor", &Thread, &thread_args)){ - Log(m_tool_name+": Failed to spawn background thread",v_error,m_verbose); + Log("Failed to spawn background thread",v_error,m_verbose); return false; } m_data->num_threads++; @@ -35,7 +35,7 @@ bool ResultWorkers::Execute(){ // FIXME ok but actually this kills all our jobs, not just our job distributor // so we don't want to do that. if(!thread_args.running){ - Log(m_tool_name+" Execute found thread not running!",v_error); + Log("Execute found thread not running!",v_error); Finalise(); Initialise(m_configfile, *m_data); // FIXME should we give up if Initialise returns false? should we set StopLoop to 1? ++(monitoring_vars.thread_crashes); @@ -48,14 +48,14 @@ bool ResultWorkers::Execute(){ bool ResultWorkers::Finalise(){ // signal job distributor thread to stop - Log(m_tool_name+": Joining receiver thread",v_warning); + Log("Joining receiver thread",v_warning); m_data->utils.KillThread(&thread_args); m_data->num_threads--; std::unique_lock locker(m_data->monitoring_variables_mtx); m_data->monitoring_variables.erase(m_tool_name); - Log(m_tool_name+": Finished",v_warning); + Log("Finished",v_warning); return true; } @@ -67,6 +67,7 @@ void ResultWorkers::Thread(Thread_args* args){ // grab a batch of read queries, with results awaiting conversion std::unique_lock locker(m_args->m_data->query_results_mtx); if(m_args->m_data->query_results.empty()){ + locker.unlock(); usleep(100); return; } @@ -97,6 +98,8 @@ void ResultWorkers::Thread(Thread_args* args){ job_data->batch = m_args->local_msg_queue[i]; job_data->m_job_name = "result_worker"; + //job_data->batch->push_time("result_job_push"); + m_args->m_data->job_queue.AddJob(the_job); } @@ -133,6 +136,7 @@ void ResultWorkers::ResultJobFail(void*& arg){ bool ResultWorkers::ResultJob(void*& arg){ ResultJobStruct* m_args = reinterpret_cast(arg); + //m_args->batch->push_time("result_worker_start"); // for now each job processes a batch, not a set of batches //for(QueryBatch* batch : m_args->local_msg_queue){ @@ -325,6 +329,8 @@ bool ResultWorkers::ResultJob(void*& arg){ } // if/else on whether this batch was read/write + //m_args->batch->push_time("result_done"); + // } // loop over query batches // pass the batch onto the next stage of the pipeline for the DatabaseWorkers @@ -334,7 +340,7 @@ bool ResultWorkers::ResultJob(void*& arg){ m_args->m_data->query_replies.push_back(m_args->batch); locker.unlock(); - printf("%s completed\n",m_args->m_job_name.c_str()); + //printf("%s completed\n",m_args->m_job_name.c_str()); ++(m_args->monitoring_vars->jobs_completed); // return our job args to the pool diff --git a/UserTools/SocketManager/SocketManager.cpp b/UserTools/SocketManager/SocketManager.cpp index 8cf20b9..dbd6a0a 100644 --- a/UserTools/SocketManager/SocketManager.cpp +++ b/UserTools/SocketManager/SocketManager.cpp @@ -29,9 +29,11 @@ bool SocketManager::Initialise(std::string configfile, DataModel &data){ thread_args.daq_utils = &daq_utils; thread_args.update_period_ms = std::chrono::milliseconds{update_ms}; thread_args.last_update = std::chrono::steady_clock::now(); + thread_mtx.lock(); + thread_args.thread_mtx = &thread_mtx; if(!m_data->utils.CreateThread("socket_manager", &Thread, &thread_args)){ - Log(m_tool_name+": Failed to spawn background thread",v_error,m_verbose); + Log("Failed to spawn background thread",v_error,m_verbose); return false; } m_data->num_threads++; @@ -45,7 +47,7 @@ bool SocketManager::Initialise(std::string configfile, DataModel &data){ bool SocketManager::Execute(){ if(!thread_args.running){ - Log(m_tool_name+" Execute found thread not running!",v_error); + Log("Execute found thread not running!",v_error); Finalise(); Initialise(m_configfile, *m_data); // FIXME should we give up if Initialise returns false? should we set StopLoop to 1? ++(monitoring_vars.thread_crashes); @@ -58,14 +60,16 @@ bool SocketManager::Execute(){ bool SocketManager::Finalise(){ // signal job distributor thread to stop - Log(m_tool_name+": Joining socket manager thread",v_warning); + Log("Joining socket manager thread",v_warning); + thread_args.running=false; + thread_mtx.unlock(); m_data->utils.KillThread(&thread_args); m_data->num_threads--; std::unique_lock locker(m_data->monitoring_variables_mtx); m_data->monitoring_variables.erase(m_tool_name); - Log(m_tool_name+": Finished",v_warning); + Log("Finished",v_warning); return true; } @@ -73,8 +77,8 @@ void SocketManager::Thread(Thread_args* args){ SocketManager_args* m_args = dynamic_cast(args); + //printf("SocketManager checking for new clients after %lu ms\n",std::chrono::duration_cast(std::chrono::steady_clock::now()-m_args->last_update).count()); m_args->last_update = std::chrono::steady_clock::now(); - //printf("SocketManager checking for new clients\n"); bool new_clients=false; @@ -83,12 +87,18 @@ void SocketManager::Thread(Thread_args* args){ ManagedSocket* sock = mgd_sock.second; - std::unique_lock locker(sock->socket_mtx); + std::unique_lock locker(sock->socket_mtx, std::defer_lock); + if(!locker.try_lock()){ + sock->socket_manager_request=true; + locker.lock(); + sock->socket_manager_request=false; + } + int new_conn_count = std::abs((long long int)sock->connections.size() - m_args->daq_utils->UpdateConnections(sock->service_name, sock->socket, sock->connections, "", sock->remote_port_name)); locker.unlock(); if(new_conn_count!=0){ - //m_args->m_data->services->SendLog(m_tool_name+": "+std::to_string(std::abs(new_conn_count))+" new connections to "+sock->service_name, v_message); // FIXME logging + //m_args->m_data->services->SendLog(std::to_string(std::abs(new_conn_count))+" new connections to "+sock->service_name, v_message); // FIXME logging printf("%d new %s connections made!\n",new_conn_count, sock->remote_port_name.c_str()); new_clients = true; @@ -121,7 +131,9 @@ void SocketManager::Thread(Thread_args* args){ } - std::this_thread::sleep_until(m_args->last_update+m_args->update_period_ms); + //std::this_thread::sleep_until(m_args->last_update+m_args->update_period_ms); + std::unique_lock timed_locker(*m_args->thread_mtx, std::defer_lock); + timed_locker.try_lock_until(m_args->last_update+m_args->update_period_ms); return; diff --git a/UserTools/SocketManager/SocketManager.h b/UserTools/SocketManager/SocketManager.h index 5af5dd9..77abdc7 100644 --- a/UserTools/SocketManager/SocketManager.h +++ b/UserTools/SocketManager/SocketManager.h @@ -1,6 +1,7 @@ #ifndef SocketManager_H #define SocketManager_H +#include #include #include "Tool.h" @@ -27,6 +28,8 @@ struct SocketManager_args : public Thread_args { std::chrono::time_point last_update; std::chrono::milliseconds update_period_ms; + std::timed_mutex* thread_mtx; + }; class SocketManager: public Tool { @@ -43,6 +46,8 @@ class SocketManager: public Tool { SocketManager_args thread_args; SocketManagerMonitoring monitoring_vars; + std::timed_mutex thread_mtx; + }; diff --git a/UserTools/WriteQueryReceiver/WriteQueryReceiver.cpp b/UserTools/WriteQueryReceiver/WriteQueryReceiver.cpp index d87d524..f9eb39b 100644 --- a/UserTools/WriteQueryReceiver/WriteQueryReceiver.cpp +++ b/UserTools/WriteQueryReceiver/WriteQueryReceiver.cpp @@ -73,8 +73,7 @@ bool WriteQueryReceiver::Initialise(std::string configfile, DataModel &data){ thread_args.m_data = m_data; thread_args.m_tool_name = m_tool_name; thread_args.monitoring_vars = &monitoring_vars; - thread_args.socket = managed_socket->socket; - thread_args.socket_mtx = &managed_socket->socket_mtx; // for sharing socket with SocketManager + thread_args.mgd_sock = managed_socket; thread_args.poll_timeout_ms = poll_timeout_ms; thread_args.poll = zmq::pollitem_t{*managed_socket->socket, 0, ZMQ_POLLIN, 0}; thread_args.in_local_queue = m_data->querybatch_pool.GetNew(local_buffer_size); @@ -85,7 +84,7 @@ bool WriteQueryReceiver::Initialise(std::string configfile, DataModel &data){ // thread needs a unique name if(!m_data->utils.CreateThread("write_query_receiver", &Thread, &thread_args)){ - Log(m_tool_name+": Failed to spawn background thread",v_error,m_verbose); + Log("Failed to spawn background thread",v_error,m_verbose); return false; } m_data->num_threads++; @@ -96,7 +95,7 @@ bool WriteQueryReceiver::Initialise(std::string configfile, DataModel &data){ bool WriteQueryReceiver::Execute(){ if(!thread_args.running){ - Log(m_tool_name+" Execute found thread not running!",v_error); + Log("Execute found thread not running!",v_error); Finalise(); Initialise(m_configfile, *m_data); // FIXME should we give up if Initialise returns false? should we set StopLoop to 1? ++(monitoring_vars.thread_crashes); @@ -117,9 +116,9 @@ bool WriteQueryReceiver::Execute(){ bool WriteQueryReceiver::Finalise(){ // signal background receiver thread to stop - Log(m_tool_name+": Joining receiver thread",v_warning); + Log("Joining receiver thread",v_warning); m_data->utils.KillThread(&thread_args); - std::cerr<<"WriteReceiver thread terminated"<num_threads--; if(m_data->managed_sockets.count(remote_port_name)){ @@ -134,7 +133,7 @@ bool WriteQueryReceiver::Finalise(){ std::unique_lock locker(m_data->monitoring_variables_mtx); m_data->monitoring_variables.erase(m_tool_name); - Log(m_tool_name+": Finished",v_warning); + Log("Finished",v_warning); return true; } @@ -151,7 +150,9 @@ void WriteQueryReceiver::Thread(Thread_args* args){ if(!m_args->make_new) m_args->in_local_queue->queries.pop_back(); - printf("%s adding %ld messages to datamodel\n",m_args->m_tool_name.c_str(),m_args->in_local_queue->queries.size()); + //printf("%s adding %ld messages to datamodel\n",m_args->m_tool_name.c_str(),m_args->in_local_queue->queries.size()); + + //m_args->in_local_queue->push_time("receiver_to_DM"); std::unique_lock locker(m_args->m_data->write_msg_queue_mtx); m_args->m_data->write_msg_queue.push_back(m_args->in_local_queue); @@ -171,8 +172,11 @@ void WriteQueryReceiver::Thread(Thread_args* args){ // poll // ==== try { - - std::unique_lock locker(*m_args->socket_mtx); + // give priority to socketmanager + while(m_args->mgd_sock->socket_manager_request){ + usleep(1); + } + std::unique_lock locker(m_args->mgd_sock->socket_mtx); m_args->get_ok = zmq::poll(&m_args->poll, 1, m_args->poll_timeout_ms); if(m_args->get_ok<0){ @@ -204,7 +208,7 @@ void WriteQueryReceiver::Thread(Thread_args* args){ // read // ==== if(m_args->poll.revents & ZMQ_POLLIN){ - printf("%s receiving message\n",m_args->m_tool_name.c_str()); + //printf("%s receiving message\n",m_args->m_tool_name.c_str()); if(m_args->make_new){ m_args->in_local_queue->queries.emplace_back(); @@ -217,16 +221,20 @@ void WriteQueryReceiver::Thread(Thread_args* args){ static constexpr char part_order[4] = {2,0,1,3}; m_args->msg_parts=0; + // for debug only + //msg_buf.times.clear(); + //msg_buf.push_time("receive"); + try { - std::unique_lock locker(*m_args->socket_mtx); - printf("%s receiving part...",m_args->m_tool_name.c_str()); + std::unique_lock locker(m_args->mgd_sock->socket_mtx); + //printf("%s receiving part...",m_args->m_tool_name.c_str()); do { - m_args->get_ok = m_args->socket->recv(&msg_buf[part_order[std::min(3,m_args->msg_parts++)]]); - printf("%d=%d (more: %d),...",m_args->msg_parts,m_args->get_ok,msg_buf[part_order[std::min(3,m_args->msg_parts-1)]].more()); + m_args->get_ok = m_args->mgd_sock->socket->recv(&msg_buf[part_order[std::min(3,m_args->msg_parts++)]]); + //printf("%d=%d (more: %d),...",m_args->msg_parts,m_args->get_ok,msg_buf[part_order[std::min(3,m_args->msg_parts-1)]].more()); } while(m_args->get_ok && msg_buf[part_order[std::min(3,m_args->msg_parts-1)]].more()); locker.unlock(); - printf("\n"); + //printf("\n"); // if receive failed, discard the message if(!m_args->get_ok){ @@ -255,7 +263,8 @@ void WriteQueryReceiver::Thread(Thread_args* args){ // else success m_args->make_new=true; ++(m_args->monitoring_vars->msgs_rcvd); - printf("%s received query %u, '%s' message '%s' into ZmqQuery at %p, %p\n",m_args->m_tool_name.c_str(), msg_buf.msg_id(), msg_buf.topic().data(), msg_buf.msg().data(), &msg_buf, &msg_buf.parts[3]); + // XXX + //printf("%s received query %u, '%s' message '%s' into ZmqQuery at %p\n",m_args->m_tool_name.c_str(), msg_buf.msg_id(), msg_buf.topic().data(), msg_buf.msg().data(), &msg_buf); } catch(zmq::error_t& err){ // receive aborted due to signals? diff --git a/UserTools/WriteQueryReceiver/WriteQueryReceiver.h b/UserTools/WriteQueryReceiver/WriteQueryReceiver.h index 4991d25..59633cc 100644 --- a/UserTools/WriteQueryReceiver/WriteQueryReceiver.h +++ b/UserTools/WriteQueryReceiver/WriteQueryReceiver.h @@ -24,8 +24,7 @@ struct WriteQueryReceiver_args : public Thread_args { std::string m_tool_name; DataModel* m_data; WriteReceiveMonitoring* monitoring_vars; - zmq::socket_t* socket=nullptr; - std::mutex* socket_mtx; // for sharing the socket with ServicesManager Tool for finding clients + ManagedSocket* mgd_sock=nullptr; int poll_timeout_ms; zmq::pollitem_t poll; diff --git a/UserTools/WriteWorkers/WriteWorkers.cpp b/UserTools/WriteWorkers/WriteWorkers.cpp index a532eae..a74ffe0 100644 --- a/UserTools/WriteWorkers/WriteWorkers.cpp +++ b/UserTools/WriteWorkers/WriteWorkers.cpp @@ -21,7 +21,7 @@ bool WriteWorkers::Initialise(std::string configfile, DataModel &data){ thread_args.m_data = m_data; thread_args.monitoring_vars = &monitoring_vars; if(!m_data->utils.CreateThread("write_job_distributor", &Thread, &thread_args)){ - Log(m_tool_name+": Failed to spawn background thread",v_error,m_verbose); + Log("Failed to spawn background thread",v_error,m_verbose); return false; } m_data->num_threads++; @@ -35,7 +35,7 @@ bool WriteWorkers::Execute(){ // FIXME ok but actually this kills all our jobs, not just our job distributor // so we don't want to do that. if(!thread_args.running){ - Log(m_tool_name+" Execute found thread not running!",v_error); + Log("Execute found thread not running!",v_error); Finalise(); Initialise(m_configfile, *m_data); // FIXME should we give up if Initialise returns false? should we set StopLoop to 1? ++(monitoring_vars.thread_crashes); @@ -48,14 +48,14 @@ bool WriteWorkers::Execute(){ bool WriteWorkers::Finalise(){ // signal job distributor thread to stop - Log(m_tool_name+": Joining job distributor thread",v_warning); + Log("Joining job distributor thread",v_warning); m_data->utils.KillThread(&thread_args); m_data->num_threads--; std::unique_lock locker(m_data->monitoring_variables_mtx); m_data->monitoring_variables.erase(m_tool_name); - Log(m_tool_name+": Finished",v_warning); + Log("Finished",v_warning); return true; } @@ -70,6 +70,7 @@ void WriteWorkers::Thread(Thread_args* args){ if(!m_args->m_data->write_msg_queue.empty()){ std::swap(m_args->m_data->write_msg_queue, m_args->local_msg_queue); } else { + locker.unlock(); usleep(100); return; } @@ -99,11 +100,12 @@ void WriteWorkers::Thread(Thread_args* args){ job_data->local_msg_queue = m_args->local_msg_queue[i]; job_data->m_job_name = "write_worker"; - printf("spawning %s job\n", job_data->m_job_name.c_str()); + //printf("spawning %s job\n", job_data->m_job_name.c_str()); the_job->func = WriteMessageJob; the_job->fail_func = WriteMessageFail; m_args->m_data->job_queue.AddJob(the_job); + //job_data->local_msg_queue->push_time("write_job_push"); } @@ -151,7 +153,9 @@ bool WriteWorkers::WriteMessageJob(void*& arg){ WriteJobStruct* m_args = static_cast(arg); - printf("%s job processing %d queries\n", m_args->m_job_name.c_str(), m_args->local_msg_queue->queries.size()); + //m_args->local_msg_queue->push_time("writeworker_start"); + + //printf("%s job processing %d queries\n", m_args->m_job_name.c_str(), m_args->local_msg_queue->queries.size()); m_args->local_msg_queue->reset(); @@ -212,12 +216,14 @@ bool WriteWorkers::WriteMessageJob(void*& arg){ // add closing ']' to any batch queries m_args->local_msg_queue->close(); + //m_args->local_msg_queue->push_time("writeworker_done"); + // pass the batch onto the next stage of the pipeline for the DatabaseWorkers std::unique_lock locker(m_args->m_data->write_query_queue_mtx); m_args->m_data->write_query_queue.push_back(m_args->local_msg_queue); locker.unlock(); - printf("%s queueing processed querybatch\n",m_args->m_job_name.c_str()); + //printf("%s queueing processed querybatch\n",m_args->m_job_name.c_str()); ++(m_args->monitoring_vars->jobs_completed); // return our job args to the pool From c06394cb142d12f625d633aad1470499d4d8cbde Mon Sep 17 00:00:00 2001 From: marcus Date: Wed, 4 Feb 2026 13:34:13 +0000 Subject: [PATCH 12/12] bugfix; clear out multicast queue after sending --- UserTools/MulticastReceiverSender/MulticastReceiverSender.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/UserTools/MulticastReceiverSender/MulticastReceiverSender.cpp b/UserTools/MulticastReceiverSender/MulticastReceiverSender.cpp index 6c39675..5f1add3 100644 --- a/UserTools/MulticastReceiverSender/MulticastReceiverSender.cpp +++ b/UserTools/MulticastReceiverSender/MulticastReceiverSender.cpp @@ -355,6 +355,7 @@ void MulticastReceiverSender::Thread(Thread_args* arg){ // else see if there are any in datamodel to grab std::unique_lock locker(*m_args->out_queue_mtx); if(!m_args->out_queue->empty()){ + m_args->out_local_queue.clear(); //printf("%s fetching new outgoing messages\n",m_args->m_tool_name.c_str()); std::swap(*m_args->out_queue, m_args->out_local_queue); ++(m_args->monitoring_vars->out_buffer_transfers);