diff --git a/physiolabxr/_ui/DSI24_Options.ui b/physiolabxr/_ui/DSI24_Options.ui
index 5483e121..b73a8d49 100644
--- a/physiolabxr/_ui/DSI24_Options.ui
+++ b/physiolabxr/_ui/DSI24_Options.ui
@@ -39,21 +39,442 @@
-
-
+
Check Impedance
-
-
+
+
+
+
+ 330
+ 0
+ 49
+ 16
+
+
+
+
+ 100
+ 100
+
+
+
+ background-color: gray;
+ color: black;
+
+
+
+ FP1
+
+
+
+
+
+ 450
+ 0
+ 49
+ 16
+
+
+
+ background-color: gray;
+ color: black;
+
+
+ FP2
+
+
+
+
+
+ 230
+ 20
+ 49
+ 16
+
+
+
+ background-color: gray;
+ color: black;
+
+
+ F7
+
+
+
+
+
+ 300
+ 50
+ 49
+ 16
+
+
+
+ background-color: gray;
+ color: black;
+
+
+ F3
+
+
+
+
+
+ 380
+ 70
+ 49
+ 16
+
+
+
+ background-color: gray;
+ color: black;
+
+
+ FZ
+
+
+
+
+
+ 540
+ 20
+ 49
+ 16
+
+
+
+ background-color: gray;
+ color: black;
+
+
+ F8
+
+
+
+
+
+ 460
+ 50
+ 49
+ 16
+
+
+
+ background-color: gray;
+ color: black;
+
+
+ F4
+
+
+
+
+
+ 320
+ 100
+ 49
+ 16
+
+
+
+ background-color: gray;
+ color: black;
+
+
+ C3
+
+
+
+
+
+ 380
+ 100
+ 49
+ 16
+
+
+
+ background-color: gray;
+ color: black;
+
+
+ CZ
+
+
+
+
+
+ 440
+ 100
+ 49
+ 16
+
+
+
+ background-color: gray;
+ color: black;
+
+
+ C4
+
+
+
+
+
+ 510
+ 100
+ 49
+ 16
+
+
+
+ background-color: gray;
+ color: black;
+
+
+ T4
+
+
+
+
+
+ 250
+ 100
+ 49
+ 16
+
+
+
+ background-color: gray;
+ color: black;
+
+
+ T3
+
+
+
+
+
+ 240
+ 180
+ 49
+ 16
+
+
+
+ background-color: gray;
+ color: black;
+
+
+ T5
+
+
+
+
+
+ 310
+ 160
+ 49
+ 16
+
+
+
+ background-color: gray;
+ color: black;
+
+
+ P3
+
+
+
+
+
+ 380
+ 140
+ 49
+ 16
+
+
+
+ background-color: gray;
+ color: black;
+
+
+ Pz CMF
+
+
+
+
+
+ 470
+ 160
+ 49
+ 16
+
+
+
+ background-color: gray;
+ color: black;
+
+
+ P4
+
+
+
+
+
+ 540
+ 180
+ 49
+ 16
+
+
+
+ background-color: gray;
+ color: black;
+
+
+ T6
+
+
+
+
+
+ 330
+ 200
+ 49
+ 16
+
+
+
+ background-color: gray;
+ color: black;
+
+
+ O1
+
+
+
+
+
+ 450
+ 200
+ 49
+ 16
+
+
+
+ background-color: gray;
+ color: black;
+
+
+ O2
+
+
+
+
+
+ 190
+ 100
+ 49
+ 16
+
+
+
+ background-color: gray;
+ color: black;
+
+
+ A1
+
+
+
+
+
+ 600
+ 100
+ 49
+ 16
+
+
+
+ background-color: gray;
+ color: black;
+
+
+ A2
+
+
+
+
+
+ 20
+ 10
+ 111
+ 16
+
+
+
+ Green: Good
+
+
+
+
+
+ 20
+ 30
+ 91
+ 16
+
+
+
+ Yellow: Ok
+
+
+
+
+
+ 20
+ 50
+ 101
+ 16
+
+
+
+ Red: Bad
+
+
+
+
+ -
+
-
-
+
Battery Level
+ -
+
+
+ 24
+
+
+
+ -
+
+
+ 24
+
+
+
diff --git a/physiolabxr/examples/rpc/VariousArgsAndReturns/VariousArgsAndReturnsRPCExample.proto b/physiolabxr/examples/rpc/VariousArgsAndReturns/VariousArgsAndReturnsRPCExample.proto
new file mode 100644
index 00000000..14e9dfd6
--- /dev/null
+++ b/physiolabxr/examples/rpc/VariousArgsAndReturns/VariousArgsAndReturnsRPCExample.proto
@@ -0,0 +1,29 @@
+syntax = "proto3";
+import "google/protobuf/empty.proto";
+service RPCExample {
+ rpc ExampleOneArgOneReturn(ExampleOneArgOneReturnRequest) returns (ExampleOneArgOneReturnResponse);
+ rpc TestRPCNoArgs(google.protobuf.Empty) returns (TestRPCNoArgsResponse);
+ rpc TestRPCNoInputNoReturn(google.protobuf.Empty) returns (google.protobuf.Empty);
+ rpc TestRPCNoReturn(TestRPCNoReturnRequest) returns (google.protobuf.Empty);
+ rpc TestRPCTwoArgTwoReturn(TestRPCTwoArgTwoReturnRequest) returns (TestRPCTwoArgTwoReturnResponse);
+}
+message ExampleOneArgOneReturnRequest {
+ string input0 = 1;
+}
+message ExampleOneArgOneReturnResponse {
+ string message = 1;
+}
+message TestRPCNoArgsResponse {
+ string message = 1;
+}
+message TestRPCNoReturnRequest {
+ float input0 = 1;
+}
+message TestRPCTwoArgTwoReturnRequest {
+ string input0 = 1;
+ int32 input1 = 2;
+}
+message TestRPCTwoArgTwoReturnResponse {
+ string message0 = 1;
+ int32 message1 = 2;
+}
\ No newline at end of file
diff --git a/physiolabxr/examples/rpc/VariousArgsAndReturns/VariousArgsAndReturnsRPCExampleServer.py b/physiolabxr/examples/rpc/VariousArgsAndReturns/VariousArgsAndReturnsRPCExampleServer.py
new file mode 100644
index 00000000..fd54ff03
--- /dev/null
+++ b/physiolabxr/examples/rpc/VariousArgsAndReturns/VariousArgsAndReturnsRPCExampleServer.py
@@ -0,0 +1,25 @@
+from google.protobuf import empty_pb2
+from google.protobuf.json_format import MessageToDict
+import VariousArgsAndReturnsRPCExample_pb2_grpc, VariousArgsAndReturnsRPCExample_pb2
+
+class RPCExampleServer(VariousArgsAndReturnsRPCExample_pb2_grpc.RPCExampleServicer):
+ script_instance = None
+ def ExampleOneArgOneReturn(self, request, context):
+ result = self.script_instance.ExampleOneArgOneReturn(**MessageToDict(request))
+ return VariousArgsAndReturnsRPCExample_pb2.ExampleOneArgOneReturnResponse(message=result)
+
+ def TestRPCNoArgs(self, request, context):
+ result = self.script_instance.TestRPCNoArgs()
+ return VariousArgsAndReturnsRPCExample_pb2.TestRPCNoArgsResponse(message=result)
+
+ def TestRPCNoInputNoReturn(self, request, context):
+ result = self.script_instance.TestRPCNoInputNoReturn()
+ return empty_pb2.Empty()
+
+ def TestRPCNoReturn(self, request, context):
+ result = self.script_instance.TestRPCNoReturn(**MessageToDict(request))
+ return empty_pb2.Empty()
+
+ def TestRPCTwoArgTwoReturn(self, request, context):
+ result = self.script_instance.TestRPCTwoArgTwoReturn(**MessageToDict(request))
+ return VariousArgsAndReturnsRPCExample_pb2.TestRPCTwoArgTwoReturnResponse(message0=result[0], message1=result[1])
diff --git a/physiolabxr/examples/rpc/VariousArgsAndReturns/VariousArgsAndReturnsRPCExample_pb2.py b/physiolabxr/examples/rpc/VariousArgsAndReturns/VariousArgsAndReturnsRPCExample_pb2.py
new file mode 100644
index 00000000..8df60c1d
--- /dev/null
+++ b/physiolabxr/examples/rpc/VariousArgsAndReturns/VariousArgsAndReturnsRPCExample_pb2.py
@@ -0,0 +1,39 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: VariousArgsAndReturnsRPCExample.proto
+# Protobuf Python Version: 5.26.1
+"""Generated protocol buffer code."""
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf.internal import builder as _builder
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n%VariousArgsAndReturnsRPCExample.proto\x1a\x1bgoogle/protobuf/empty.proto\"/\n\x1d\x45xampleOneArgOneReturnRequest\x12\x0e\n\x06input0\x18\x01 \x01(\t\"1\n\x1e\x45xampleOneArgOneReturnResponse\x12\x0f\n\x07message\x18\x01 \x01(\t\"(\n\x15TestRPCNoArgsResponse\x12\x0f\n\x07message\x18\x01 \x01(\t\"(\n\x16TestRPCNoReturnRequest\x12\x0e\n\x06input0\x18\x01 \x01(\x02\"?\n\x1dTestRPCTwoArgTwoReturnRequest\x12\x0e\n\x06input0\x18\x01 \x01(\t\x12\x0e\n\x06input1\x18\x02 \x01(\x05\"D\n\x1eTestRPCTwoArgTwoReturnResponse\x12\x10\n\x08message0\x18\x01 \x01(\t\x12\x10\n\x08message1\x18\x02 \x01(\x05\x32\x91\x03\n\nRPCExample\x12Y\n\x16\x45xampleOneArgOneReturn\x12\x1e.ExampleOneArgOneReturnRequest\x1a\x1f.ExampleOneArgOneReturnResponse\x12?\n\rTestRPCNoArgs\x12\x16.google.protobuf.Empty\x1a\x16.TestRPCNoArgsResponse\x12H\n\x16TestRPCNoInputNoReturn\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\x12\x42\n\x0fTestRPCNoReturn\x12\x17.TestRPCNoReturnRequest\x1a\x16.google.protobuf.Empty\x12Y\n\x16TestRPCTwoArgTwoReturn\x12\x1e.TestRPCTwoArgTwoReturnRequest\x1a\x1f.TestRPCTwoArgTwoReturnResponseb\x06proto3')
+
+_globals = globals()
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'VariousArgsAndReturnsRPCExample_pb2', _globals)
+if not _descriptor._USE_C_DESCRIPTORS:
+ DESCRIPTOR._loaded_options = None
+ _globals['_EXAMPLEONEARGONERETURNREQUEST']._serialized_start=70
+ _globals['_EXAMPLEONEARGONERETURNREQUEST']._serialized_end=117
+ _globals['_EXAMPLEONEARGONERETURNRESPONSE']._serialized_start=119
+ _globals['_EXAMPLEONEARGONERETURNRESPONSE']._serialized_end=168
+ _globals['_TESTRPCNOARGSRESPONSE']._serialized_start=170
+ _globals['_TESTRPCNOARGSRESPONSE']._serialized_end=210
+ _globals['_TESTRPCNORETURNREQUEST']._serialized_start=212
+ _globals['_TESTRPCNORETURNREQUEST']._serialized_end=252
+ _globals['_TESTRPCTWOARGTWORETURNREQUEST']._serialized_start=254
+ _globals['_TESTRPCTWOARGTWORETURNREQUEST']._serialized_end=317
+ _globals['_TESTRPCTWOARGTWORETURNRESPONSE']._serialized_start=319
+ _globals['_TESTRPCTWOARGTWORETURNRESPONSE']._serialized_end=387
+ _globals['_RPCEXAMPLE']._serialized_start=390
+ _globals['_RPCEXAMPLE']._serialized_end=791
+# @@protoc_insertion_point(module_scope)
diff --git a/physiolabxr/examples/rpc/VariousArgsAndReturns/VariousArgsAndReturnsRPCExample_pb2_grpc.py b/physiolabxr/examples/rpc/VariousArgsAndReturns/VariousArgsAndReturnsRPCExample_pb2_grpc.py
new file mode 100644
index 00000000..ffd40507
--- /dev/null
+++ b/physiolabxr/examples/rpc/VariousArgsAndReturns/VariousArgsAndReturnsRPCExample_pb2_grpc.py
@@ -0,0 +1,275 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+"""Client and server classes corresponding to protobuf-defined services."""
+import grpc
+import warnings
+
+import VariousArgsAndReturnsRPCExample_pb2 as VariousArgsAndReturnsRPCExample__pb2
+from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
+
+GRPC_GENERATED_VERSION = '1.65.1'
+GRPC_VERSION = grpc.__version__
+EXPECTED_ERROR_RELEASE = '1.66.0'
+SCHEDULED_RELEASE_DATE = 'August 6, 2024'
+_version_not_supported = False
+
+try:
+ from grpc._utilities import first_version_is_lower
+ _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION)
+except ImportError:
+ _version_not_supported = True
+
+if _version_not_supported:
+ warnings.warn(
+ f'The grpc package installed is at version {GRPC_VERSION},'
+ + f' but the generated code in VariousArgsAndReturnsRPCExample_pb2_grpc.py depends on'
+ + f' grpcio>={GRPC_GENERATED_VERSION}.'
+ + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}'
+ + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.'
+ + f' This warning will become an error in {EXPECTED_ERROR_RELEASE},'
+ + f' scheduled for release on {SCHEDULED_RELEASE_DATE}.',
+ RuntimeWarning
+ )
+
+
+class RPCExampleStub(object):
+ """Missing associated documentation comment in .proto file."""
+
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.ExampleOneArgOneReturn = channel.unary_unary(
+ '/RPCExample/ExampleOneArgOneReturn',
+ request_serializer=VariousArgsAndReturnsRPCExample__pb2.ExampleOneArgOneReturnRequest.SerializeToString,
+ response_deserializer=VariousArgsAndReturnsRPCExample__pb2.ExampleOneArgOneReturnResponse.FromString,
+ _registered_method=True)
+ self.TestRPCNoArgs = channel.unary_unary(
+ '/RPCExample/TestRPCNoArgs',
+ request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
+ response_deserializer=VariousArgsAndReturnsRPCExample__pb2.TestRPCNoArgsResponse.FromString,
+ _registered_method=True)
+ self.TestRPCNoInputNoReturn = channel.unary_unary(
+ '/RPCExample/TestRPCNoInputNoReturn',
+ request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
+ response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
+ _registered_method=True)
+ self.TestRPCNoReturn = channel.unary_unary(
+ '/RPCExample/TestRPCNoReturn',
+ request_serializer=VariousArgsAndReturnsRPCExample__pb2.TestRPCNoReturnRequest.SerializeToString,
+ response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
+ _registered_method=True)
+ self.TestRPCTwoArgTwoReturn = channel.unary_unary(
+ '/RPCExample/TestRPCTwoArgTwoReturn',
+ request_serializer=VariousArgsAndReturnsRPCExample__pb2.TestRPCTwoArgTwoReturnRequest.SerializeToString,
+ response_deserializer=VariousArgsAndReturnsRPCExample__pb2.TestRPCTwoArgTwoReturnResponse.FromString,
+ _registered_method=True)
+
+
+class RPCExampleServicer(object):
+ """Missing associated documentation comment in .proto file."""
+
+ def ExampleOneArgOneReturn(self, request, context):
+ """Missing associated documentation comment in .proto file."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def TestRPCNoArgs(self, request, context):
+ """Missing associated documentation comment in .proto file."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def TestRPCNoInputNoReturn(self, request, context):
+ """Missing associated documentation comment in .proto file."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def TestRPCNoReturn(self, request, context):
+ """Missing associated documentation comment in .proto file."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def TestRPCTwoArgTwoReturn(self, request, context):
+ """Missing associated documentation comment in .proto file."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+
+def add_RPCExampleServicer_to_server(servicer, server):
+ rpc_method_handlers = {
+ 'ExampleOneArgOneReturn': grpc.unary_unary_rpc_method_handler(
+ servicer.ExampleOneArgOneReturn,
+ request_deserializer=VariousArgsAndReturnsRPCExample__pb2.ExampleOneArgOneReturnRequest.FromString,
+ response_serializer=VariousArgsAndReturnsRPCExample__pb2.ExampleOneArgOneReturnResponse.SerializeToString,
+ ),
+ 'TestRPCNoArgs': grpc.unary_unary_rpc_method_handler(
+ servicer.TestRPCNoArgs,
+ request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
+ response_serializer=VariousArgsAndReturnsRPCExample__pb2.TestRPCNoArgsResponse.SerializeToString,
+ ),
+ 'TestRPCNoInputNoReturn': grpc.unary_unary_rpc_method_handler(
+ servicer.TestRPCNoInputNoReturn,
+ request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
+ response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
+ ),
+ 'TestRPCNoReturn': grpc.unary_unary_rpc_method_handler(
+ servicer.TestRPCNoReturn,
+ request_deserializer=VariousArgsAndReturnsRPCExample__pb2.TestRPCNoReturnRequest.FromString,
+ response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
+ ),
+ 'TestRPCTwoArgTwoReturn': grpc.unary_unary_rpc_method_handler(
+ servicer.TestRPCTwoArgTwoReturn,
+ request_deserializer=VariousArgsAndReturnsRPCExample__pb2.TestRPCTwoArgTwoReturnRequest.FromString,
+ response_serializer=VariousArgsAndReturnsRPCExample__pb2.TestRPCTwoArgTwoReturnResponse.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'RPCExample', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
+ server.add_registered_method_handlers('RPCExample', rpc_method_handlers)
+
+
+ # This class is part of an EXPERIMENTAL API.
+class RPCExample(object):
+ """Missing associated documentation comment in .proto file."""
+
+ @staticmethod
+ def ExampleOneArgOneReturn(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/RPCExample/ExampleOneArgOneReturn',
+ VariousArgsAndReturnsRPCExample__pb2.ExampleOneArgOneReturnRequest.SerializeToString,
+ VariousArgsAndReturnsRPCExample__pb2.ExampleOneArgOneReturnResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
+
+ @staticmethod
+ def TestRPCNoArgs(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/RPCExample/TestRPCNoArgs',
+ google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
+ VariousArgsAndReturnsRPCExample__pb2.TestRPCNoArgsResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
+
+ @staticmethod
+ def TestRPCNoInputNoReturn(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/RPCExample/TestRPCNoInputNoReturn',
+ google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
+ google_dot_protobuf_dot_empty__pb2.Empty.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
+
+ @staticmethod
+ def TestRPCNoReturn(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/RPCExample/TestRPCNoReturn',
+ VariousArgsAndReturnsRPCExample__pb2.TestRPCNoReturnRequest.SerializeToString,
+ google_dot_protobuf_dot_empty__pb2.Empty.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
+
+ @staticmethod
+ def TestRPCTwoArgTwoReturn(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/RPCExample/TestRPCTwoArgTwoReturn',
+ VariousArgsAndReturnsRPCExample__pb2.TestRPCTwoArgTwoReturnRequest.SerializeToString,
+ VariousArgsAndReturnsRPCExample__pb2.TestRPCTwoArgTwoReturnResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
diff --git a/physiolabxr/interfaces/DeviceInterface/DSI24/DSI24_Interface.py b/physiolabxr/interfaces/DeviceInterface/DSI24/DSI24_Interface.py
index c765681f..515ec539 100644
--- a/physiolabxr/interfaces/DeviceInterface/DSI24/DSI24_Interface.py
+++ b/physiolabxr/interfaces/DeviceInterface/DSI24/DSI24_Interface.py
@@ -7,9 +7,10 @@
from physiolabxr.third_party.WearableSensing.DSI_py3 import *
from physiolabxr.interfaces.DeviceInterface.DeviceInterface import DeviceInterface
-def run_dsi24_headset_process(port, com_port):
+
+def run_dsi24_headset_process(port, com_port, impedance):
terminate_event = Event()
- headset_process = Process(target=DSI24_process, args=(terminate_event, port, com_port))
+ headset_process = Process(target=DSI24_process, args=(terminate_event, port, com_port, impedance))
headset_process.start()
return headset_process, terminate_event
@@ -28,9 +29,11 @@ def __init__(self,
self.port = self.socket.getsockopt(zmq.LAST_ENDPOINT).decode("utf-8").split(":")[-1]
self.data_process = None
self.terminate_event = None
+ self.battery_level = None
+ self.impedanceValues = []
- def start_stream(self, bluetooth_port):
- self.data_process, self.terminate_event = run_dsi24_headset_process(self.port, bluetooth_port)
+ def start_stream(self, bluetooth_port, impedance):
+ self.data_process, self.terminate_event = run_dsi24_headset_process(self.port, bluetooth_port, impedance)
def process_frames(self):
frames, timestamps, messages = [], [], []
@@ -42,8 +45,16 @@ def process_frames(self):
elif data['t'] == 'e':
raise DSIException(data['message']) # this will cause stop_stream to be called
elif data['t'] == 'd':
- frames.append(data['frame'])
- timestamps.append(data['timestamp'])
+ self.battery_level = data['battery']
+ if data['impedance'] == 0:
+ frames.append(data['frame'])
+ timestamps.append(data['timestamp'])
+ else:
+ self.impedanceValues = data['frame']
+ frames.append(data['frame'])
+ timestamps.append(data['timestamp'])
+
+
except zmq.error.Again:
break
diff --git a/physiolabxr/interfaces/DeviceInterface/DSI24/DSI24_Options.py b/physiolabxr/interfaces/DeviceInterface/DSI24/DSI24_Options.py
index a83d561d..018b0847 100644
--- a/physiolabxr/interfaces/DeviceInterface/DSI24/DSI24_Options.py
+++ b/physiolabxr/interfaces/DeviceInterface/DSI24/DSI24_Options.py
@@ -1,14 +1,74 @@
from physiolabxr.ui.BaseDeviceOptions import BaseDeviceOptions
-
-
+from PyQt6.QtCore import QTimer
+from PyQt6.QtWidgets import QProgressBar
+from physiolabxr.configs.configs import AppConfigs
class DSI24_Options(BaseDeviceOptions):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
+ self.Impedance = 0
+ self.check_impedance_chkbx.clicked.connect(self.check_impedance_chkbx_clicked)
+ self.device_interface.battery_level
+ self.batteryBar1.setValue(0) # Set the value of the first progress bar
+ self.batteryBar2.setValue(0)
+ self.timer = QTimer(self)
+ self.timer.timeout.connect(self.batteryUpdate)
+ self.timer.start(5000)
+ self.timer2 = QTimer(self)
+ self.timer2.timeout.connect(self.updateImpedance)
+ self.timer2.start(1000)
+ self.impedanceDictionary = {
+ 0: self.FP1,
+ 1: self.FP2,
+ 2: self.Fz,
+ 3: self.F3,
+ 4: self.F4,
+ 5: self.F7,
+ 6: self.F8,
+ 7: self.Cz,
+ 8: self.C3,
+ 9: self.C4,
+ 10: self.T3,
+ 11: self.T4,
+ 12: self.T5,
+ 13: self.T6,
+ 14: self.P3,
+ 15: self.P4,
+ 16: self.O1,
+ 17: self.O2,
+ 18: self.A1,
+ 19: self.A2,
+ }
+ def check_impedance_chkbx_clicked(self):
+ # This method will be called when the checkbox is clicked
+ if self.check_impedance_chkbx.isChecked():
+ print("Check Impedance is checked.")
+ self.Impedance = 1
- self.check_impedance_btn.clicked.connect(self.check_impedance_btn_clicked)
-
- def check_impedance_btn_clicked(self):
- raise NotImplementedError
+ # Add logic to handle the case when the checkbox is checked
+ else:
+ print("Check Impedance is unchecked.")
+ self.Impedance = 0
+
+ def updateImpedance(self):
+ if self.Impedance == 1:
+ if self.device_interface.impedanceValues:
+ impedanceValues = self.device_interface.impedanceValues
+ for i in range(20):
+ if float(impedanceValues[i][0]) < 1:
+ color = 'green'
+ elif float(impedanceValues[i][0]) < 10 and float(impedanceValues[i][0]) > 1:
+ color = 'yellow'
+ else:
+ color = 'red'
+ self.impedanceDictionary[i].setStyleSheet(f"""background-color: {color}; color: black""")
+
+ def batteryUpdate(self):
+ if self.device_interface.battery_level != None:
+ self.batteryBar1.setValue(self.device_interface.battery_level[0])
+ self.batteryBar2.setValue(self.device_interface.battery_level[1])
def start_stream_args(self):
- return {'bluetooth_port': self.device_port_lineedit.text()}
+ return {
+ 'bluetooth_port': self.device_port_lineedit.text(),
+ 'impedance': self.Impedance
+ }
diff --git a/physiolabxr/interfaces/DeviceInterface/DSI24/DSI24_Process.py b/physiolabxr/interfaces/DeviceInterface/DSI24/DSI24_Process.py
index 30b5b7c9..ec452600 100644
--- a/physiolabxr/interfaces/DeviceInterface/DSI24/DSI24_Process.py
+++ b/physiolabxr/interfaces/DeviceInterface/DSI24/DSI24_Process.py
@@ -6,7 +6,6 @@
from physiolabxr.third_party.WearableSensing.DSI_py3 import *
import numpy as np
from pylsl import local_clock
-
is_first_time = True
time_offset = 0
dsi24_data_socket = None
@@ -39,7 +38,7 @@ def example_sample_callback_signals(headsetPtr, packetTime, userData):
h = Headset(headsetPtr)
new_data = np.array(['%+08.2f' % (ch.GetSignal()) for ch in h.Channels()])
new_data = new_data.reshape(24, 1)
- new_data = new_data[[9, 10, 3, 2, 4, 17, 18, 7, 1, 5, 11, 22, 12, 21, 8, 0, 6, 13, 14, 20, 23, 19, 15, 16], :]
+ new_data = new_data[[9, 10, 3, 2, 4, 17, 18, 7, 1, 5, 11, 22, 12, 21, 8, 0, 6, 13, 14, 20, 23, 19, 16, 15], :]
# Calculate the time offset on the first packet
if is_first_time:
@@ -51,13 +50,15 @@ def example_sample_callback_signals(headsetPtr, packetTime, userData):
print('Data and timestamp mismatch')
print(new_data.shape)
print(len(t))
-
+ batteryLevel = list(map(int, IfStringThenNormalString(h.GetBatteryLevelString()).strip("[]").split(",")))
# Create a dictionary with the stream name, data, and timestamps
# need to convert the new_data to list to make it json serializable
new_data_dict = {
't': 'd', # 'd' for data, 'i' for info, 'e' for error
'frame': new_data.tolist(),
- 'timestamp': t
+ 'timestamp': t,
+ 'battery': batteryLevel,
+ 'impedance': 0
}
# Send data via ZMQ socket to the main process
@@ -68,19 +69,67 @@ def example_sample_callback_signals(headsetPtr, packetTime, userData):
@SampleCallback
def example_sample_callback_impedances(headsetPtr, packetTime, userData):
- raise NotImplementedError
+ global is_first_time
+ global time_offset
+ global dsi24_data_socket
+
+ # Create the headset object
+ h = Headset(headsetPtr)
+
+ # Collect impedance values from all referential EEG channels (excluding factory reference)
+ impedance_data = np.array(
+ ['%+08.2f' % (src.GetImpedanceEEG()) for src in h.Sources() if src.IsReferentialEEG() and not src.IsFactoryReference()])
+ impedance_data = impedance_data.reshape(len(impedance_data), 1)
+ impedance_data = impedance_data[[10,11,5,4,6,16,17,9,3,7,12,19,13,18,0,8,14,15,1,2], :]
+ empty_rows = np.empty((3,impedance_data.shape[1]), dtype=object)
+ impedance_data = np.concatenate((impedance_data, empty_rows))
+
+ # Add the common-mode function (CMF) impedance value at the end
+ cmf_impedance = np.array([h.GetImpedanceCMF()])
+ impedance_data = np.vstack([impedance_data, cmf_impedance])
+
+ # Calculate the time offset on the first packet
+ if is_first_time:
+ time_offset = local_clock() - float(packetTime)
+ is_first_time = False
+
+ # Create timestamp
+ t = [float(packetTime) + time_offset]
+
+ # Ensure that data and timestamps are aligned
+ if impedance_data.shape[1] != len(t):
+ print('Data and timestamp mismatch')
+ print(impedance_data.shape)
+ print(len(t))
+ batteryLevel = list(map(int, IfStringThenNormalString(h.GetBatteryLevelString()).strip("[]").split(",")))
+ # Convert impedance data to a dictionary for streaming
+ impedance_data_dict = {
+ 't': 'd', # 'd' for data, 'i' for info, 'e' for error
+ 'frame': impedance_data.tolist(), # Convert impedance data to a list for JSON serialization
+ 'timestamp': t,
+ 'battery': batteryLevel,
+ 'impedance': 1
+ }
-def DSI24_process(terminate_event, network_port, com_port, args=''):
+ # Send the impedance data via ZMQ socket to the main process
+ try:
+ dsi24_data_socket.send_json(impedance_data_dict)
+ except zmq.error.ZMQError:
+ print("Socket already closed.")
+
+
+def DSI24_process(terminate_event, network_port, com_port, impedance,args=''):
"""Process to connect to the DSI-24 device and send data to the main process
Args:
network_port (int): The port number to send data to the main process
com_port (str): The COM port to connect to the DSI-24 device
- mode (str): The mode of the headset (default: None), NOT IMPLEMENTED
+ impedance (str): The mode of the headset (default: None), NOT IMPLEMENTED
"""
global dsi24_data_socket
global is_first_time
global time_offset
+ global batteryLevel
context = zmq.Context()
dsi24_data_socket = context.socket(zmq.PUSH)
@@ -97,14 +146,15 @@ def DSI24_process(terminate_event, network_port, com_port, args=''):
headset.Disconnect()
return
- if args.lower().startswith('imp'):
- # Currently not used
+ if impedance == 1:
+
headset.SetSampleCallback(example_sample_callback_impedances, 0)
headset.StartImpedanceDriver()
else:
# Set the sample callback to ExampleSampleCallback_Signals
headset.SetSampleCallback(example_sample_callback_signals, 0)
if len(args.strip()): headset.SetDefaultReference(args, True)
+ print("EEG mode")
# Start the data acquisition
print("starting background acquisition")
headset.StartBackgroundAcquisition()
diff --git a/physiolabxr/scripting/WearableSensing/WearableSensingBasicScript/MotorImageryDemo.py b/physiolabxr/scripting/WearableSensing/WearableSensingBasicScript/MotorImageryDemo.py
new file mode 100644
index 00000000..fc4d249b
--- /dev/null
+++ b/physiolabxr/scripting/WearableSensing/WearableSensingBasicScript/MotorImageryDemo.py
@@ -0,0 +1,244 @@
+import os
+from enum import Enum
+
+import mne
+import numpy as np
+from mne import create_info
+from mne.decoding import CSP
+from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
+from sklearn.pipeline import Pipeline
+
+from physiolabxr.scripting.RenaScript import RenaScript
+from physiolabxr.scripting.physio.epochs import get_event_locked_data
+from physiolabxr.utils.buffers import flatten
+
+event_marker_stream_name = 'EventMarker_BallGame'
+
+
+class GameStates(Enum):
+ idle = 'idle'
+ train = 'train'
+ fit = 'fit'
+ eval = 'eval'
+
+
+class Events(Enum):
+ train_start = 1
+ left_trial = 2
+ right_trial = 3
+ eval_start = 6
+
+
+class CSPDecoder:
+
+ def __init__(self, n_components=4):
+ self.n_components = n_components
+ self.csp = CSP(n_components=n_components, reg=None, log=True, norm_trace=False)
+ self.lda = LinearDiscriminantAnalysis()
+ self.clf = Pipeline([('CSP', self.csp), ('LDA', self.lda)])
+
+ def fit(self, X, y):
+ X = self.csp.fit_transform(X, y)
+ # fit classifier
+ self.lda.fit(X, y)
+
+ def csp_transfomr(self, X):
+ return self.csp.transform(X)
+
+ def transform(self, X):
+ X = self.csp.transform(X)
+ return self.lda.transform(X)
+
+
+def sigmoid(z):
+ return 1 / (1 + np.exp(-z))
+
+
+class MovingAverage:
+ def __init__(self, window_size=10):
+ self.window_size = window_size
+ self.buffer = []
+
+ def update(self, value):
+ self.buffer.append(value)
+ if len(self.buffer) > self.window_size:
+ self.buffer.pop(0)
+
+ def get(self):
+ return np.mean(self.buffer)
+
+
+class MotorImageryBalanceBall(RenaScript):
+ def __init__(self, *args, **kwargs):
+ """
+ Please do not edit this function
+ """
+ super().__init__(*args, **kwargs)
+ self.cur_state = 'idle'
+
+ # Start will be called once when the run button is hit.
+ def init(self):
+ """
+
+ Note on script params:
+
+ """
+ # self.train_data_buffer = DataBuffer()
+ # self.eval_data_buffer = DataBuffer()
+ self.cur_state = 'idle'
+ self.transition_markers = [Events.train_start.value, -Events.train_start.value, Events.eval_start.value,
+ -Events.eval_start.value]
+ self.eeg_channels = ["F3", "Fz", "F4", "C3", "Cz", "C4", "P3", "P4", ]
+ self.decoder_tmin = 2.
+ self.decoder_tmax = 5.
+ self.srate = 300
+ self.decode_t_len = int((self.decoder_tmax - self.decoder_tmin) * self.srate)
+ self.label_mapping = {2: 0, 3: 1}
+ self.decoder = None
+ self.moving_average = MovingAverage(window_size=3)
+
+ self.use_aggregated_data = False
+ if "participant_name" in self.params and type(self.params["participant_name"]) is str and self.params[
+ "participant_name"] != "" and \
+ "participants_data_dir" in self.params and type(
+ self.params["participants_data_dir"]) is str and os.path.exists(self.params["participants_data_dir"]):
+ self.use_aggregated_data = True
+ print(
+ "Will use aggregated data. To not use aggregated data, remove participant_name or participant_data_dir from the parameters tab. Or"
+ "set participant_name to empty string or participant_data_dir to a non-existing directory.")
+ else:
+ self.use_aggregated_data = False
+ print(
+ "Will not use aggregated data. To use aggregated data, please set participant_name and participant_data_dir in the parameters tab."
+ "and participant_name is not empty string and participant_data_dir exists.")
+
+ # loop is called times per second
+
+ def loop(self):
+
+ if event_marker_stream_name not in self.inputs.keys(): # or #EVENT_MARKER_CHANNEL_NAME not in self.inputs.keys():
+ # print('Event marker stream not found')
+ return
+
+ self.process_event_markers()
+ if self.cur_state == GameStates.train:
+ pass
+ # keep collecting data
+ # print("In training")
+ elif self.cur_state == GameStates.eval:
+ self.decode()
+ # print("In evaluation")
+
+ # cleanup is called when the stop button is hit
+ def cleanup(self):
+ print('Cleanup function is called')
+
+ def process_event_markers(self):
+ if event_marker_stream_name in self.inputs.keys() and len(
+ np.intersect1d(self.inputs[event_marker_stream_name][0], self.transition_markers)) > 0:
+ last_processed_marker_index = None
+ for i, event_marker in enumerate(self.inputs[event_marker_stream_name][0].T):
+ game_event_marker = event_marker[0]
+ print(f'Event marker is {event_marker} at index {i}')
+
+ # state transition logic
+ if game_event_marker == Events.train_start.value:
+ self.cur_state = GameStates.train
+ print('Entering training block')
+ last_processed_marker_index = i
+
+ elif game_event_marker == -Events.train_start.value: # exiting train state
+ # collect the trials and train the decoding model
+ self.collect_trials_and_train()
+ self.cur_state = GameStates.idle
+ print('Exiting training block')
+ last_processed_marker_index = i
+
+ elif event_marker == Events.eval_start.value:
+ self.cur_state = GameStates.eval
+ print('Entering evaluation block')
+ last_processed_marker_index = i
+
+ elif event_marker == -Events.eval_start.value:
+ self.cur_state = GameStates.idle
+ print('Exiting evaluation block')
+ last_processed_marker_index = i
+
+ # # collect event marker data
+ # if self.cur_state == GameStates.train:
+ # event_type = game_state_event_marker
+ # timestamp = self.inputs[event_marker_stream_name][1][i]
+ #
+ # # self.train_data_buffer.
+ # pass
+ #
+ # elif self.cur_state == GameStates.eval:
+ # pass
+
+ # self.inputs.clear_stream_buffer_data(event_marker_stream_name)
+ if last_processed_marker_index is not None:
+ self.inputs.clear_stream_up_to_index(event_marker_stream_name, last_processed_marker_index + 1)
+
+ def collect_trials_and_train(self):
+ event_locked_data, last_event_time = get_event_locked_data(event_marker=self.inputs[event_marker_stream_name],
+ data=self.inputs["DSI-24"],
+ events_of_interest=[Events.left_trial.value,
+ Events.right_trial.value],
+ tmin=self.decoder_tmin, tmax=self.decoder_tmax,
+ srate=self.srate, return_last_event_time=True,
+ verbose=1)
+ # TODO check the shape of the event locked data, how long is it. does it equal decode_t_len
+
+ train_end_index = np.argwhere(self.inputs[event_marker_stream_name][0][0] == - Events.train_start.value).item()
+ train_end_time = self.inputs[event_marker_stream_name][1][train_end_index]
+ self.inputs.clear_up_to(
+ train_end_time) # Clear the input buffer up to the last event time to avoid processing duplicate data
+
+ # build the classifier, ref https://mne.tools/dev/auto_examples/decoding/decoding_csp_eeg.html
+ labels = flatten([[events] * len(data) for events, data in event_locked_data.items()])
+ labels = np.array([self.label_mapping[label] for label in labels])
+ epochs_data = np.concatenate(list(event_locked_data.values()[[3,2,5,8,7,9,15,16],:]), axis=0)
+ info = create_info(ch_names=self.eeg_channels, sfreq=self.srate, ch_types='eeg')
+ montage = mne.channels.make_standard_montage("biosemi64")
+ info.set_montage(montage)
+
+ if self.use_aggregated_data:
+ participant_dir = os.path.join(self.params["participants_data_dir"], self.params["participant_name"])
+ if os.path.exists(participant_dir): # use aggregated data
+ loaded_epochs = np.load(os.path.join(participant_dir, "epochs_data.npy"))
+ epochs_data = np.concatenate((epochs_data, loaded_epochs))
+ labels = np.concatenate((labels, np.load(os.path.join(participant_dir, "labels.npy"))))
+ print(
+ f"Post-train: Loaded {len(loaded_epochs)} for participant {self.params['participant_name']}. Concatenated with current data. Total data size: {epochs_data.shape}")
+ else: # if this is a new participant, create the directory
+ os.makedirs(participant_dir)
+ print(
+ f"Post-train: New participant: {self.params['participant_name']} Created directory {os.path.join(self.params['participants_data_dir'], self.params['participant_name'])}")
+ # save the data
+ np.save(
+ os.path.join(self.params["participants_data_dir"], self.params["participant_name"], "epochs_data.npy"),
+ epochs_data)
+ np.save(os.path.join(self.params["participants_data_dir"], self.params["participant_name"], "labels.npy"),
+ labels)
+ print(f"Post-train: Saved {len(epochs_data)} for participant {self.params['participant_name']}")
+
+ self.decoder = CSPDecoder(n_components=4)
+ self.decoder.fit(epochs_data, labels)
+ # get the classification score
+ y_pred = self.decoder.transform(epochs_data)
+ score = self.decoder.lda.score(self.decoder.csp_transfomr(epochs_data), labels)
+ print(f"Fitting completed. Classification score: {score}. Plotting CSP...")
+ self.decoder.csp.plot_patterns(info, ch_type="eeg", units="Patterns (AU)", size=1.5)
+
+ def decode(self):
+ if "ma_window" in self.params and self.moving_average.window_size != self.params["ma_window"]:
+ self.moving_average = MovingAverage(window_size=self.params["ma_window"])
+ data = self.inputs["DSI-24"][0][None, [3,2,5,8,7,9,15,16], -self.decode_t_len:]
+ y_pred = self.decoder.transform(data)[0] # only one sample in batch
+ # normalize y_pred from -10 to 10 to 0 to 1
+ y_pred = sigmoid(y_pred)
+ # apply moving average
+ self.moving_average.update(y_pred[0])
+ self.outputs["MotorImageryInference"] = [self.moving_average.get()]
+
+
diff --git a/physiolabxr/scripting/WearableSensing/WearableSensingScript.py b/physiolabxr/scripting/WearableSensing/WearableSensingBasicScript/WearableSensingScript.py
similarity index 79%
rename from physiolabxr/scripting/WearableSensing/WearableSensingScript.py
rename to physiolabxr/scripting/WearableSensing/WearableSensingBasicScript/WearableSensingScript.py
index 4d4bd774..d3ed80ac 100644
--- a/physiolabxr/scripting/WearableSensing/WearableSensingScript.py
+++ b/physiolabxr/scripting/WearableSensing/WearableSensingBasicScript/WearableSensingScript.py
@@ -1,3 +1,4 @@
+from pylsl import local_clock
from physiolabxr.scripting.RenaScript import RenaScript
from physiolabxr.third_party.WearableSensing.DSI_py3 import *
import numpy as np
@@ -7,17 +8,20 @@
#Creating a data buffer with the DataBuffer class
data_buffer = DataBuffer()
+is_first_time = True
+time_offset = 0 # time offset for the first packet to the local_clock()
@MessageCallback
-def ExampleMessageCallback(msg, lvl=0):
+def ExampleMessageCallback( msg, lvl=0 ):
if lvl <= 3: # ignore messages at debugging levels higher than 3
- print("DSI Message (level %d): %s" % (lvl, IfStringThenNormalString(msg)))
+ print( "DSI Message (level %d): %s" % ( lvl, IfStringThenNormalString( msg ) ) )
return 1
-
-
@SampleCallback
def ExampleSampleCallback_Signals(headsetPtr, packetTime, userData):
#This is the function that will be called every time a new packet is received
global data_buffer
+ global is_first_time
+ global time_offset
+
#Grab the headset by using a pointer
h = Headset(headsetPtr)
#Get the signal from each channel and format it so that it can be created into an array
@@ -25,9 +29,19 @@ def ExampleSampleCallback_Signals(headsetPtr, packetTime, userData):
#Reshapes the array into a 24x1 array so that it can be inputted into the data_buffer
new_data = new_data.reshape(24,1)
#Rearrange new_data to fit with desired output format
- new_data = new_data[[9, 10, 3, 2, 4, 17, 18, 7, 1, 5, 11, 22, 12, 21, 8, 0, 6, 13, 14, 20, 23, 19, 15, 16], :]
+ new_data = new_data[[9, 10, 3, 2, 4, 17, 18, 7, 1, 5, 11, 22, 12, 21, 8, 0, 6, 13, 14, 20, 23, 19, 16, 15], :]
#Get the time of the packet as a temporary solution to timestamps
- t = [packetTime]
+ if is_first_time:
+ time_offset = local_clock() - float(packetTime)
+ is_first_time = False
+
+ t = [float(packetTime) + time_offset]
+ if new_data.shape[1] != len(t):
+ print('Data and timestamp mismatch')
+ print(new_data.shape)
+ print(new_data.shape)
+ print(len(t))
+
#Create a dictionary with the stream name, data, and timestamps
new_data_dict = {
'stream_name': 'DSI-24',
@@ -36,19 +50,6 @@ def ExampleSampleCallback_Signals(headsetPtr, packetTime, userData):
}
#Update the data buffer with the new data
data_buffer.update_buffer(new_data_dict)
-
-
-@SampleCallback
-def ExampleSampleCallback_Impedances(headsetPtr, packetTime, userData):
- #Not yet used
- h = Headset(headsetPtr)
- fmt = '%s = %5.3f'
- strings = [fmt % (IfStringThenNormalString(src.GetName()), src.GetImpedanceEEG()) for src in h.Sources() if
- src.IsReferentialEEG() and not src.IsFactoryReference()]
- strings.append(fmt % ('CMF @ ' + h.GetFactoryReferenceString(), h.GetImpedanceCMF()))
- print(('%8.3f: ' % packetTime) + ', '.join(strings))
- sys.stdout.flush()
-
class DSI24(RenaScript):
def __init__(self, *args, **kwargs):
"""
@@ -79,6 +80,7 @@ def init(self, arg = ''):
#Start the data acquisition
self.headset.StartBackgroundAcquisition()
+
def loop(self):
#Called every loop based on the user's chosen frequency
global data_buffer
@@ -91,7 +93,12 @@ def loop(self):
def cleanup(self):
#Called when the script is stopped
global data_buffer
+ global is_first_time
+ global time_offset
#Stop the data acquisition
- self.headset.StopDataAcquisition()
+ self.headset.StopBackgroundAcquisition()
#Disconnect the headset
+ time_offset = 0
+ is_first_time = True
+ self.headset.Disconnect()
data_buffer.clear_buffer()
diff --git a/physiolabxr/scripting/WearableSensing/WearableSensingBasicScript/__init__.py b/physiolabxr/scripting/WearableSensing/WearableSensingBasicScript/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/CVEP-noTraining.py b/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/CVEP-noTraining.py
new file mode 100644
index 00000000..bd3cfb43
--- /dev/null
+++ b/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/CVEP-noTraining.py
@@ -0,0 +1,112 @@
+from scipy.signal import butter, filtfilt
+from sklearn.cross_decomposition import CCA
+import numpy as np
+from physiolabxr.scripting.RenaScript import RenaScript
+from physiolabxr.utils.buffers import DataBuffer
+from physiolabxr.rpc.decorator import rpc, async_rpc
+
+class NeuralCooked(RenaScript):
+ def __init__(self, *args, **kwargs):
+ """
+ Please do not edit this function
+ """
+ super().__init__(*args, **kwargs)
+
+ def init(self):
+ self.freq_bands = [(8, 60), (12, 60), (30, 60)] #defining frequency bands for filter bank
+ self.mSequence = [ #creating a list of m_sequences
+ [1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0], #mSequence1
+ [1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1], #mSequence2
+ [0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1] #mSequence3
+ ]
+ self.frequency = 300 #default frequency of DSI-24
+ self.data = DataBuffer() #generating a data buffer for EEG data
+ self.cca_models = [] #creating a list to store all of the CCA models
+ self.decoded_choices = [] #creating a list to store all of the decoded choices
+
+ def loop(self):
+ EEG_Data = { #creating a dictionary for EEG data
+ 'stream_name': 'EEG Data', #defining the stream name
+ 'frames': self.inputs['DSI-24'][0][14:18, :], #choosing the correct channels
+ 'timestamps': self.inputs['DSI-24'][1] #defining the timestamps
+ }
+ self.data.update_buffer(EEG_Data) #updating the data buffer with EEG data
+ if self.data.get_data('EEG Data').shape[1] > 60000: #if the data is longer than 200 seconds then cut off beginning of data so that it is to 200 seconds
+ self.data.clear_stream_up_to_index(self, stream_name='EEG Data', cut_to_index=len(self.data.get_data('EEG Data').shape[1])-60000)
+
+ band_data = self.apply_filter_banks(self.data.get_data('EEG Data')) #applying filter banks to EEG data
+ self.correlation_coefficients = self.apply_shifting_window_cca(band_data) #getting the correlation coefficients by applying shifting window CCA
+ highest_correlation, detected_choice = self.evaluate_correlation_coefficients( #evaluating the correlation coefficients to get the highest correlation and the detected choice
+ self.correlation_coefficients)
+ self.decoded_choices.append(detected_choice) #adding the detected choice to the list of detected choices
+
+ def cleanup(self):
+ self.freq_bands = [(8, 60), (12, 60), (30, 60)]
+ self.mSequence = []
+ self.frequency = 300
+ self.data = DataBuffer()
+ self.cca_models = []
+ self.decoded_choices = []
+
+ ## Basic Tools
+ def apply_filter_banks(self, data):
+ band_data = {}
+ for band in self.freq_bands:
+ band_key = f'band_{band[0]}_{band[1]}'
+ band_data[band_key] = self.bandpass_filter(data, band[0], band[1], self.frequency)
+ return band_data
+
+ def bandpass_filter(self, data, lowcut, highcut, fs, order=8):
+ nyquist = 0.5 * fs
+ low = lowcut / nyquist
+ high = highcut / nyquist
+ b, a = butter(order, [low, high], btype='band')
+ filtered_data = filtfilt(b, a, data)
+ return filtered_data
+
+ ## Signal Generation and CCA
+ def generate_reference_signal(self, m_sequence, length):
+ # Repeat the m-sequence to match the length of the EEG data
+ repetitions = length // len(m_sequence) + 1
+ reference_signal = np.tile(m_sequence, repetitions)[:length]
+ return reference_signal
+
+ def apply_shifting_window_cca(self, band_data):
+ correlation_coefficients = {1: [], 2: [], 3: []}
+ for band in self.freq_bands:
+ band_key = f'band_{band[0]}_{band[1]}'
+ for seq_index in range(3):
+ reference_signal = self.generate_reference_signal(self.mSequence[seq_index], band_data[band_key].shape[1])
+ cca = CCA(n_components=1)
+ cca.fit(band_data[band_key].T, reference_signal)
+ transformed_data = cca.transform(band_data[band_key].T)
+ corr = np.corrcoef(transformed_data[:, 0], reference_signal)[0, 1]
+ correlation_coefficients[seq_index + 1].append(corr)
+ return correlation_coefficients
+
+ def evaluate_correlation_coefficients(self, correlation_coefficients):
+ avg_correlations = {
+ 'mSequence1': np.mean(correlation_coefficients[1]),
+ 'mSequence2': np.mean(correlation_coefficients[2]),
+ 'mSequence3': np.mean(correlation_coefficients[3])
+ }
+
+ # Sort the sequences by their average correlation in descending order
+ sorted_correlations = sorted(avg_correlations.items(), key=lambda item: item[1], reverse=True)
+
+ # Get the highest and second-highest correlations
+ highest_sequence, highest_corr = sorted_correlations[0]
+ second_highest_sequence, second_highest_corr = sorted_correlations[1]
+
+ # Check if the highest correlation is at least 0.15 higher than the second highest
+ if highest_corr >= second_highest_corr + 0.15:
+ return highest_corr, highest_sequence
+ else:
+ return highest_corr, -1
+
+ @rpc
+ def decode(self) -> int:
+ choices = self.decoded_choices
+ user_choice = max(set(choices), key=choices.count)
+ self.decoded_choices = []
+ return user_choice
diff --git a/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/CVEP_TEST2.py b/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/CVEP_TEST2.py
new file mode 100644
index 00000000..b6684724
--- /dev/null
+++ b/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/CVEP_TEST2.py
@@ -0,0 +1,159 @@
+#Using 2 RPCs (train, decode) that gets called
+import numpy as np
+from scipy.signal import butter, filtfilt
+from sklearn.cross_decomposition import CCA
+from collections import deque
+from enum import Enum
+import numpy as np
+from physiolabxr.scripting.RenaScript import RenaScript
+from physiolabxr.utils.buffers import DataBuffer
+from imblearn.over_sampling import SMOTE
+from sklearn.linear_model import LogisticRegression
+from sklearn.model_selection import train_test_split
+from sklearn.metrics import f1_score
+from sklearn import metrics
+from physiolabxr.rpc.decorator import rpc, async_rpc
+
+
+#This is a test file to test out different approaches to the game
+#We will be using a database to store data for training
+class NeuralCooked(RenaScript):
+ def __init__(self, *args, **kwargs):
+ """
+ Please do not edit this function
+ """
+ super().__init__(*args, **kwargs)
+
+ def init(self):
+ self.mSequence = []
+ self.frequency = 300
+ self.freq_bands = [(8, 60), (12, 60), (30, 60)]
+ self.data = DataBuffer()
+ self.training_EEG_label = np.array([])
+ self.cca_models = None
+ self.decoded_choices = []
+
+ def loop(self):
+ #Grabbing p3, p4, o1 and o1 data and putting it into another data buffer
+ EEG_Data = {
+ 'stream_name': 'EEG Data',
+ 'frames': self.inputs['DSI-24'][0][14:18, :],
+ 'timestamps': self.inputs['DSI-24'][1]
+ }
+ self.data.update_buffer(EEG_Data)
+ #Removes the data if its longer than 20 seconds
+ if self.data.get_data('EEG Data').shape[1] > 60000:
+ self.data.clear_stream_up_to_index(self, stream_name= 'EEG Data', cut_to_index= len(self.data.get_data('EEG Data').shape[1]))
+ #if training is done(can be seen if cca_models is not none) then we can start playing
+ if self.cca_models != None:
+ # Get the EEG Data and split it into band channels
+ band_data = self.apply_filter_banks(self.data.get_data('EEG Data'))
+ # Apply shifting window CCA to each band channel
+ self.correlation_coefficients = self.apply_shifting_window_cca(band_data)
+ # Look at correlation coefficient averages for each band channel
+ # Look at who has the highest coefficient average
+ highest_correlation, detected_choice = self.evaluate_correlation_coefficients(self.correlation_coefficients)
+ self.decoded_choices.append[detected_choice]
+
+ #Training
+ def train_cca(self, m_sequences):
+ self.mSequence = m_sequences
+ # Split the data via band channels using filter banks
+ band_data = self.apply_filter_banks(self.data.get_data('EEG Data'))
+ # Train a shifting CCA for each band channel
+ EEG_split =[]
+ #Need to split the training data into 3 different sequences
+ #Training has to be around 200s or 3 minutes and 20 seconds can be increased if needed
+ EEG_split[0] = f'0:20000'
+ EEG_split[1] = f'20000:40000'
+ EEG_split[2] = f'40000:60000'
+ cca_model = {}
+ for seq in range(3):
+ for band in self.freq_bands:
+ band_key = f'band_{band[0]}_{band[1]}'
+ cca_model[band_key] = self.shifting_cca(band_data[band_key][EEG_split[seq]],m_sequences[seq])
+ self.cca_models[seq] = cca_model
+
+ def apply_filter_banks(self, data):
+ band_data = {}
+ for band in self.freq_bands:
+ band_key = f'band_{band[0]}_{band[1]}'
+ band_data[band_key] = self.bandpass_filter(data, band[0], band[1])
+ return band_data
+
+ def bandpass_filter(data, lowcut, highcut, fs, order=8):
+ nyquist = 0.5 * fs
+ low = lowcut / nyquist
+ high = highcut / nyquist
+ b, a = butter(order, [low, high], btype='band')
+ filtered_data= filtfilt(b, a, data)
+ return filtered_data
+
+ def shifting_cca(self, band_data, m_sequence_number):
+ # Implement CCA training for shifting window
+ window_size = 300 # Define window size (e.g., 1 second for a 300 Hz signal)
+ step_size = 150 # Define step size (e.g., 0.5 seconds for a 300 Hz signal)
+ num_windows = (band_data.shape[1] - window_size) // step_size + 1
+ cca_model = []
+ for i in range(num_windows):
+ start = i * step_size
+ end = start + window_size
+ window_data = band_data[:, start:end]
+ cca = CCA(n_components=1)
+ cca.fit(window_data.T, self.mSequence[m_sequence_number])
+ cca_model.append(cca)
+ return cca_model
+
+ #Playing
+ def apply_shifting_window_cca(self, band_data):
+ correlation_coefficients = {1: [], 2: [], 3: []}
+ for band in self.freq_bands:
+ band_key = f'band_{band[0]}_{band[1]}'
+ for seq in correlation_coefficients.keys():
+ if self.cca_models[seq] and band_key in self.cca_models[seq]:
+ correlation_coefficients[seq].append(
+ self.calculate_correlations(band_data[band_key], self.cca_models[seq][band_key]))
+ return correlation_coefficients
+
+ def evaluate_correlation_coefficients(self, correlation_coefficients):
+ avg_correlations = {
+ 'mSequence1': np.mean(correlation_coefficients[0]),
+ 'mSequence2': np.mean(correlation_coefficients[1]),
+ 'mSequence3': np.mean(correlation_coefficients[2])
+ }
+
+ # Sort the sequences by their average correlation in descending order
+ sorted_correlations = sorted(avg_correlations.items(), key=lambda item: item[1], reverse=True)
+
+ # Get the highest and second-highest correlations
+ highest_sequence, highest_corr = sorted_correlations[0]
+ second_highest_sequence, second_highest_corr = sorted_correlations[1]
+
+ # Check if the highest correlation is at least 0.15 higher than the second highest
+ if highest_corr >= second_highest_corr + 0.15:
+ return highest_corr, highest_sequence
+ else:
+ return None, None
+ def cleanup(self):
+ return
+
+ @rpc
+ def decode(self) -> int:
+ choices = self.decoded_choices
+ user_choice = max(set(choices), key=choices.count)
+ return user_choice
+
+
+ @async_rpc
+ def training(self, input0: int, input1: int):
+ """
+ Args:
+ input0: int - 1 for choice 1, 2 for choice 2, 3 for choice 3
+ Returns: Generates correlation coefficients for EEG data x m-sequence
+ """
+ self.mSequence = input1
+ # Train the CCA
+ self.train_cca(input0)
+
+ return
+
diff --git a/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/CVEP_TEST3.proto b/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/CVEP_TEST3.proto
new file mode 100644
index 00000000..456122e2
--- /dev/null
+++ b/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/CVEP_TEST3.proto
@@ -0,0 +1,17 @@
+syntax = "proto3";
+import "google/protobuf/empty.proto";
+service NeuralCooked {
+ rpc add_seq_data(add_seq_dataRequest) returns (google.protobuf.Empty);
+ rpc decode(google.protobuf.Empty) returns (decodeResponse);
+ rpc training(google.protobuf.Empty) returns (trainingResponse);
+}
+message add_seq_dataRequest {
+ int32 sequenceNum = 1;
+ float duration = 2;
+}
+message decodeResponse {
+ int32 message = 1;
+}
+message trainingResponse {
+ int32 message = 1;
+}
\ No newline at end of file
diff --git a/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/CVEP_TEST3.py b/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/CVEP_TEST3.py
new file mode 100644
index 00000000..006d1ddc
--- /dev/null
+++ b/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/CVEP_TEST3.py
@@ -0,0 +1,266 @@
+from scipy.signal import butter, filtfilt
+from sklearn.cross_decomposition import CCA
+import numpy as np
+from physiolabxr.scripting.RenaScript import RenaScript
+from physiolabxr.utils.buffers import DataBuffer
+from physiolabxr.rpc.decorator import rpc, async_rpc
+
+class NeuralCooked(RenaScript):
+ def __init__(self, *args, **kwargs):
+ """
+ Please do not edit this function
+ """
+ super().__init__(*args, **kwargs)
+
+ def init(self):
+ self.freq_bands = [(8, 60), (12, 60), (30, 60)] # defining frequency bands for filter bank
+ self.mSequence = [] # creating a list of m_sequences
+ self.frequency = 300 # default frequency of DSI-24
+ self.data = DataBuffer() # generating a data buffer for EEG data
+ self.cca_models = [] # creating a list to store all of the CCA models
+ self.decoded_choices = [] # creating a list to store all of the decoded choices
+ self.mSequence = [
+ [1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0], # mSequence1
+ [1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1], # mSequence2
+ [0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1] # mSequence3
+ ]
+ self.sequence_length = len(self.mSequence[0])
+ self.mSequenceSignal = []
+ self.seq1_data = np.array([[]])
+ self.seq2_data = np.array([[]])
+ self.seq3_data = np.array([[]])
+
+ def loop(self):
+ if self.inputs:
+ EEG_Data = { # creating a dictionary for EEG data
+ 'stream_name': 'EEG Data', # defining the stream name
+ 'frames': self.inputs['DSI24'][0][14:18, :].astype(float), # choosing the correct channels
+ 'timestamps': self.inputs['DSI24'][1].astype(float) # defining the timestamps
+ }
+ Sequence_Data = {} # creating a dictionary for sequence data
+ self.data.update_buffer(EEG_Data) # updating the data buffer with EEG data
+ if self.data.get_data('EEG Data').shape[
+ 1] > 60000: # if the data is longer than 200 seconds then cut off beginning of data so that it is to 200 seconds
+ self.data.clear_stream_up_to_index(stream_name='EEG Data',
+ cut_to_index=self.data.get_data('EEG Data').shape[1] - 60000)
+ if len(self.cca_models) == 3: # if training is complete (i.e there are 3 CCA models) then we can start decoding everything asyncronously
+ self.decode_choice() #adding the detected choice to the list of detected choices
+ def cleanup(self):
+ self.freq_bands = [(8, 60), (12, 60), (30, 60)]
+ self.mSequence = []
+ self.frequency = 300
+ self.data = DataBuffer()
+ self.cca_models = []
+ self.decoded_choices = []
+ self.mSequence = []
+ return
+
+
+ ##Basic Tools
+#===================================================================================================
+ def adjust_segments(self, segments, segment_length):
+ adjusted_segments = []
+ for segment in segments:
+ # If the segment is shorter than the desired length, pad it with zeros
+ if segment.shape[1] < segment_length:
+ padding = np.zeros((segment.shape[0], segment_length - segment.shape[1]))
+ adjusted_segment = np.hstack((segment, padding)) # Pad with zeros
+ else:
+ # If the segment is longer, trim it to the desired length
+ adjusted_segment = segment[:, :segment_length]
+
+ adjusted_segments.append(adjusted_segment)
+
+ return adjusted_segments
+
+ def generate_m_signal(self, seqNum):
+
+ # Step 1: Calculate the total number of samples needed
+ total_samples = self.sequence_length
+
+ # Step 2: Calculate the number of samples per m-sequence element
+ samples_per_bit = total_samples // len(self.mSequence[seqNum])
+
+ # Step 3: Create the binary signal by repeating each bit
+ signal = np.repeat(self.mSequence[seqNum], samples_per_bit)
+
+ # Step 4: If the signal is longer than required, truncate it
+ if len(signal) > total_samples:
+ signal = signal[:total_samples]
+ return signal
+
+ def apply_filter_banks(self, data):
+ band_data = {}
+ for band in self.freq_bands:
+ band_key = f'band_{band[0]}_{band[1]}'
+ band_data[band_key] = self.bandpass_filter(data, band[0], band[1], self.frequency)
+ return band_data
+
+
+ def bandpass_filter(self, data, lowcut, highcut,fs, order=8):
+ filtered_data = []
+ nyquist = 0.5 * fs
+ low = lowcut / nyquist
+ high = highcut / nyquist
+ b, a = butter(order, [low, high], btype='band')
+ for i in range(4):
+ filtered_data[i]= filtfilt(b, a, data[i])
+ return filtered_data
+
+
+
+ ##Begin Training
+#===================================================================================================
+ #Need to add an RPC function that adds to training dataset
+ @async_rpc
+ def training(self) -> int:
+ """
+ Args:
+ input0: int - 1 for choice 1, 2 for choice 2, 3 for choice 3
+ Returns: Generates correlation coefficients for EEG data x m-sequence
+ """
+ # Train the CCA
+ self.train_cca() #start training the CCA
+ return 1
+
+
+ @async_rpc
+ def add_seq_data(self, sequenceNum: int, duration: float): #Data is going to come in sequencially seq1 -> seq2 -> seq3 repeat
+ eegData = self.data.get_data('EEG Data')[:, int(-duration *300) :] # 4 by (samples)
+
+ if sequenceNum == 1:
+ if self.seq1_data.size == 0:
+ self.seq1_data = eegData
+ else:
+ self.seq1_data = np.concatenate((self.seq1_data, eegData),axis =1)
+ elif sequenceNum == 2:
+ if self.seq2_data.size == 0:
+ self.seq2_data = eegData
+ else:
+ self.seq2_data = np.concatenate((self.seq2_data, eegData), axis=1)
+ elif sequenceNum == 3:
+ if self.seq3_data.size == 0:
+ self.seq3_data = eegData
+ else:
+ self.seq3_data = np.concatenate((self.seq3_data, eegData), axis=1)
+
+ def train_cca(self):
+ """
+ Trains the CCA model.
+ This method generates spatial filters and templates for each target m-sequence.
+ """
+
+ # Split data into segments for each m-sequence
+ seq1_segments = np.array_split(self.seq1_data, self.seq1_data.shape[1] // self.sequence_length, axis=1)
+ seq2_segments = np.array_split(self.seq2_data, self.seq2_data.shape[1] // self.sequence_length, axis=1)
+ seq3_segments = np.array_split(self.seq3_data, self.seq3_data.shape[1] // self.sequence_length, axis=1)
+
+ seq1_segments = self.adjust_segments(seq1_segments, self.sequence_length)
+ seq2_segments = self.adjust_segments(seq2_segments, self.sequence_length)
+ seq3_segments = self.adjust_segments(seq3_segments, self.sequence_length)
+
+
+ # Generate templates by averaging segments for each m-sequence
+ templates = {
+ 1: np.mean(seq1_segments, axis=0),
+ 2: np.mean(seq2_segments, axis=0),
+ 3: np.mean(seq3_segments, axis=0)
+ }
+
+ # Generate CCA-based spatial filters and templates for each band and each m-sequence
+ cca_model = {}
+ for band in self.freq_bands:
+ band_key = f'band_{band[0]}_{band[1]}'
+ cca_model[band_key] = {}
+ band_data = [[]]
+ for i in range(0, 3): # Assuming there are 3 m-sequences
+ band_data[i] = self.apply_filter_banks(templates[i]) #generates band data
+ cca = CCA(n_components=1) #initalizes cca model
+
+
+ cca.fit(band_data[i].T, self.mSequenceSignal[i].T)
+ cca_model[band_key][i] = cca
+
+ # Store the CCA models
+ self.cca_models = cca_model
+ self.templates = templates # Store templates for future use
+
+
+ ##Begin Playing
+#===================================================================================================
+ @async_rpc
+ def decode(self) -> int:
+ # Get the choices decoded so far
+ choices = self.decoded_choices
+
+ # Determine the most common choice
+ user_choice = max(set(choices), key=choices.count)
+
+ # Clear the decoded choices list for the next round
+ self.decoded_choices = []
+
+ # Return the most common detected choice
+ return user_choice
+ def decode_choice(self):
+ band_data = self.apply_filter_banks(self.data.get_data('EEG Data')[:, -1500]) # applying filter banks to EEG data
+ self.correlation_coefficients = self.apply_shifting_window_cca(band_data) # getting the correlation coefficients by applying shifting window CCA
+ highest_correlation, detected_choice = self.evaluate_correlation_coefficients(self.correlation_coefficients) # evaluating the correlation coefficients to get the highest correlation and the detected choice
+ self.decoded_choices.append[detected_choice]
+ def apply_shifting_window_cca(self, band_data):
+ """
+ Applies shifting window CCA to the filtered band data.
+ """
+ correlation_coefficients = {1: [], 2: [], 3: []}
+ window_size = 300 # For example, 1 second window for 300 Hz sampling rate
+ step_size = 150 # For example, 0.5 second step size for 300 Hz sampling rate
+
+ for seq in correlation_coefficients.keys():
+ for band_idx, band in enumerate(self.freq_bands):
+ band_key = f'band_{band[0]}_{band[1]}'
+ if seq in self.cca_models and band_key in self.cca_models[seq]:
+ cca_model = self.cca_models[seq][band_key]
+ num_windows = (band_data[band_key].shape[1] - window_size) // step_size + 1
+
+ for i in range(num_windows):
+ start = i * step_size
+ end = start + window_size
+ window_data = band_data[band_key][:, start:end]
+
+ # Apply the CCA model to the windowed data
+ filtered_data = cca_model.transform(window_data.T)
+
+ # Calculate the correlation with the corresponding frequency band template
+ correlation = self.calculate_correlations(filtered_data, seq, band_key)
+
+ correlation_coefficients[seq].append(correlation)
+
+ return correlation_coefficients
+
+ def calculate_correlations(self, filtered_data, seq, band_key):
+ # Get the corresponding template for the sequence and frequency band
+ template = self.templates[seq][band_key]
+
+ # Calculate the correlation between the filtered data and the corresponding filtered template
+ corr = np.corrcoef(filtered_data[:, 0], template[:, 0])[0, 1]
+
+ return corr
+
+ def evaluate_correlation_coefficients(self, correlation_coefficients):
+ avg_correlations = {
+ 1: np.mean(correlation_coefficients[1]),
+ 2: np.mean(correlation_coefficients[2]),
+ 3: np.mean(correlation_coefficients[3])
+ }
+
+ # Sort the sequences by their average correlation in descending order
+ sorted_correlations = sorted(avg_correlations.items(), key=lambda item: item[1], reverse=True)
+
+ # Get the highest and second-highest correlations
+ highest_sequence, highest_corr = sorted_correlations[0]
+ second_highest_sequence, second_highest_corr = sorted_correlations[1]
+
+ # Check if the highest correlation is at least 0.15 higher than the second highest
+ if highest_corr >= second_highest_corr + 0.15:
+ return highest_corr, highest_sequence
+ else:
+ return highest_corr, -1
diff --git a/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/CVEP_TEST3Server.py b/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/CVEP_TEST3Server.py
new file mode 100644
index 00000000..fad58dcc
--- /dev/null
+++ b/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/CVEP_TEST3Server.py
@@ -0,0 +1,17 @@
+from google.protobuf import empty_pb2
+from google.protobuf.json_format import MessageToDict
+import CVEP_TEST3_pb2_grpc, CVEP_TEST3_pb2
+
+class NeuralCookedServer(CVEP_TEST3_pb2_grpc.NeuralCookedServicer):
+ script_instance = None
+ async def add_seq_data(self, request, context):
+ result = self.script_instance.add_seq_data(**MessageToDict(request))
+ return empty_pb2.Empty()
+
+ async def decode(self, request, context):
+ result = self.script_instance.decode()
+ return CVEP_TEST3_pb2.decodeResponse(message=result)
+
+ async def training(self, request, context):
+ result = self.script_instance.training()
+ return CVEP_TEST3_pb2.trainingResponse(message=result)
diff --git a/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/CVEP_TEST3_pb2.py b/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/CVEP_TEST3_pb2.py
new file mode 100644
index 00000000..632fe1db
--- /dev/null
+++ b/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/CVEP_TEST3_pb2.py
@@ -0,0 +1,43 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# NO CHECKED-IN PROTOBUF GENCODE
+# source: CVEP_TEST3.proto
+# Protobuf Python Version: 5.27.2
+"""Generated protocol buffer code."""
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import runtime_version as _runtime_version
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf.internal import builder as _builder
+_runtime_version.ValidateProtobufRuntimeVersion(
+ _runtime_version.Domain.PUBLIC,
+ 5,
+ 27,
+ 2,
+ '',
+ 'CVEP_TEST3.proto'
+)
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x10\x43VEP_TEST3.proto\x1a\x1bgoogle/protobuf/empty.proto\"<\n\x13\x61\x64\x64_seq_dataRequest\x12\x13\n\x0bsequenceNum\x18\x01 \x01(\x05\x12\x10\n\x08\x64uration\x18\x02 \x01(\x02\"!\n\x0e\x64\x65\x63odeResponse\x12\x0f\n\x07message\x18\x01 \x01(\x05\"#\n\x10trainingResponse\x12\x0f\n\x07message\x18\x01 \x01(\x05\x32\xb6\x01\n\x0cNeuralCooked\x12<\n\x0c\x61\x64\x64_seq_data\x12\x14.add_seq_dataRequest\x1a\x16.google.protobuf.Empty\x12\x31\n\x06\x64\x65\x63ode\x12\x16.google.protobuf.Empty\x1a\x0f.decodeResponse\x12\x35\n\x08training\x12\x16.google.protobuf.Empty\x1a\x11.trainingResponseb\x06proto3')
+
+_globals = globals()
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'CVEP_TEST3_pb2', _globals)
+if not _descriptor._USE_C_DESCRIPTORS:
+ DESCRIPTOR._loaded_options = None
+ _globals['_ADD_SEQ_DATAREQUEST']._serialized_start=49
+ _globals['_ADD_SEQ_DATAREQUEST']._serialized_end=109
+ _globals['_DECODERESPONSE']._serialized_start=111
+ _globals['_DECODERESPONSE']._serialized_end=144
+ _globals['_TRAININGRESPONSE']._serialized_start=146
+ _globals['_TRAININGRESPONSE']._serialized_end=181
+ _globals['_NEURALCOOKED']._serialized_start=184
+ _globals['_NEURALCOOKED']._serialized_end=366
+# @@protoc_insertion_point(module_scope)
diff --git a/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/CVEP_TEST3_pb2_grpc.py b/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/CVEP_TEST3_pb2_grpc.py
new file mode 100644
index 00000000..8f3522a9
--- /dev/null
+++ b/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/CVEP_TEST3_pb2_grpc.py
@@ -0,0 +1,184 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+"""Client and server classes corresponding to protobuf-defined services."""
+import grpc
+import warnings
+
+import CVEP_TEST3_pb2 as CVEP__TEST3__pb2
+from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
+
+GRPC_GENERATED_VERSION = '1.67.0'
+GRPC_VERSION = grpc.__version__
+_version_not_supported = False
+
+try:
+ from grpc._utilities import first_version_is_lower
+ _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION)
+except ImportError:
+ _version_not_supported = True
+
+if _version_not_supported:
+ raise RuntimeError(
+ f'The grpc package installed is at version {GRPC_VERSION},'
+ + f' but the generated code in CVEP_TEST3_pb2_grpc.py depends on'
+ + f' grpcio>={GRPC_GENERATED_VERSION}.'
+ + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}'
+ + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.'
+ )
+
+
+class NeuralCookedStub(object):
+ """Missing associated documentation comment in .proto file."""
+
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.add_seq_data = channel.unary_unary(
+ '/NeuralCooked/add_seq_data',
+ request_serializer=CVEP__TEST3__pb2.add_seq_dataRequest.SerializeToString,
+ response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
+ _registered_method=True)
+ self.decode = channel.unary_unary(
+ '/NeuralCooked/decode',
+ request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
+ response_deserializer=CVEP__TEST3__pb2.decodeResponse.FromString,
+ _registered_method=True)
+ self.training = channel.unary_unary(
+ '/NeuralCooked/training',
+ request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
+ response_deserializer=CVEP__TEST3__pb2.trainingResponse.FromString,
+ _registered_method=True)
+
+
+class NeuralCookedServicer(object):
+ """Missing associated documentation comment in .proto file."""
+
+ def add_seq_data(self, request, context):
+ """Missing associated documentation comment in .proto file."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def decode(self, request, context):
+ """Missing associated documentation comment in .proto file."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def training(self, request, context):
+ """Missing associated documentation comment in .proto file."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+
+def add_NeuralCookedServicer_to_server(servicer, server):
+ rpc_method_handlers = {
+ 'add_seq_data': grpc.unary_unary_rpc_method_handler(
+ servicer.add_seq_data,
+ request_deserializer=CVEP__TEST3__pb2.add_seq_dataRequest.FromString,
+ response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
+ ),
+ 'decode': grpc.unary_unary_rpc_method_handler(
+ servicer.decode,
+ request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
+ response_serializer=CVEP__TEST3__pb2.decodeResponse.SerializeToString,
+ ),
+ 'training': grpc.unary_unary_rpc_method_handler(
+ servicer.training,
+ request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
+ response_serializer=CVEP__TEST3__pb2.trainingResponse.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'NeuralCooked', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
+ server.add_registered_method_handlers('NeuralCooked', rpc_method_handlers)
+
+
+ # This class is part of an EXPERIMENTAL API.
+class NeuralCooked(object):
+ """Missing associated documentation comment in .proto file."""
+
+ @staticmethod
+ def add_seq_data(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/NeuralCooked/add_seq_data',
+ CVEP__TEST3__pb2.add_seq_dataRequest.SerializeToString,
+ google_dot_protobuf_dot_empty__pb2.Empty.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
+
+ @staticmethod
+ def decode(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/NeuralCooked/decode',
+ google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
+ CVEP__TEST3__pb2.decodeResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
+
+ @staticmethod
+ def training(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/NeuralCooked/training',
+ google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
+ CVEP__TEST3__pb2.trainingResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
diff --git a/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/CVEPdummyScript.proto b/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/CVEPdummyScript.proto
new file mode 100644
index 00000000..d0780907
--- /dev/null
+++ b/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/CVEPdummyScript.proto
@@ -0,0 +1,17 @@
+syntax = "proto3";
+import "google/protobuf/empty.proto";
+service CVEPdummyScript {
+ rpc addSeqData(addSeqDataRequest) returns (google.protobuf.Empty);
+ rpc decodeChoice(google.protobuf.Empty) returns (decodeChoiceResponse);
+ rpc trainingModel(google.protobuf.Empty) returns (trainingModelResponse);
+}
+message addSeqDataRequest {
+ int32 sequenceNum = 1;
+ float Duration = 2;
+}
+message decodeChoiceResponse {
+ int32 message = 1;
+}
+message trainingModelResponse {
+ int32 message = 1;
+}
\ No newline at end of file
diff --git a/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/CVEPdummyScript.py b/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/CVEPdummyScript.py
new file mode 100644
index 00000000..f3c4a990
--- /dev/null
+++ b/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/CVEPdummyScript.py
@@ -0,0 +1,47 @@
+#Using 2 RPCs (train, decode) that gets called
+import numpy as np
+from scipy.signal import butter, filtfilt
+from sklearn.cross_decomposition import CCA
+from collections import deque
+from enum import Enum
+import numpy as np
+from physiolabxr.scripting.RenaScript import RenaScript
+from physiolabxr.utils.buffers import DataBuffer
+from imblearn.over_sampling import SMOTE
+from sklearn.linear_model import LogisticRegression
+from sklearn.model_selection import train_test_split
+from sklearn.metrics import f1_score
+from sklearn import metrics
+from physiolabxr.rpc.decorator import rpc, async_rpc
+import time
+import random
+
+class CVEPdummyScript(RenaScript):
+ def __init__(self, *args, **kwargs):
+ """
+ Please do not edit this function
+ """
+ super().__init__(*args, **kwargs)
+
+ def init(self):
+ return
+ def loop(self):
+ return #adding the detected choice to the list of detected choices
+ def cleanup(self):
+ return
+
+ @async_rpc
+ def trainingModel(self) -> int:
+ time.sleep(1)
+ return 1
+
+ @async_rpc
+ def addSeqData(self, sequenceNum: int, Duration: float): # Data is going to come in sequencially seq1 -> seq2 -> seq3 repeat
+ time.sleep(Duration)
+ print(sequenceNum)
+
+ @async_rpc
+ def decodeChoice(self) -> int:
+ return random.randint(1,3)
+
+
diff --git a/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/CVEPdummyScriptServer.py b/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/CVEPdummyScriptServer.py
new file mode 100644
index 00000000..e9eed68e
--- /dev/null
+++ b/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/CVEPdummyScriptServer.py
@@ -0,0 +1,17 @@
+from google.protobuf import empty_pb2
+from google.protobuf.json_format import MessageToDict
+import CVEPdummyScript_pb2_grpc, CVEPdummyScript_pb2
+
+class CVEPdummyScriptServer(CVEPdummyScript_pb2_grpc.CVEPdummyScriptServicer):
+ script_instance = None
+ async def addSeqData(self, request, context):
+ result = self.script_instance.addSeqData(**MessageToDict(request))
+ return empty_pb2.Empty()
+
+ async def decodeChoice(self, request, context):
+ result = self.script_instance.decodeChoice()
+ return CVEPdummyScript_pb2.decodeChoiceResponse(message=result)
+
+ async def trainingModel(self, request, context):
+ result = self.script_instance.trainingModel()
+ return CVEPdummyScript_pb2.trainingModelResponse(message=result)
diff --git a/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/CVEPdummyScript_pb2.py b/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/CVEPdummyScript_pb2.py
new file mode 100644
index 00000000..071e78d2
--- /dev/null
+++ b/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/CVEPdummyScript_pb2.py
@@ -0,0 +1,43 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# NO CHECKED-IN PROTOBUF GENCODE
+# source: CVEPdummyScript.proto
+# Protobuf Python Version: 5.27.2
+"""Generated protocol buffer code."""
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import runtime_version as _runtime_version
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf.internal import builder as _builder
+_runtime_version.ValidateProtobufRuntimeVersion(
+ _runtime_version.Domain.PUBLIC,
+ 5,
+ 27,
+ 2,
+ '',
+ 'CVEPdummyScript.proto'
+)
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15\x43VEPdummyScript.proto\x1a\x1bgoogle/protobuf/empty.proto\":\n\x11\x61\x64\x64SeqDataRequest\x12\x13\n\x0bsequenceNum\x18\x01 \x01(\x05\x12\x10\n\x08\x44uration\x18\x02 \x01(\x02\"\'\n\x14\x64\x65\x63odeChoiceResponse\x12\x0f\n\x07message\x18\x01 \x01(\x05\"(\n\x15trainingModelResponse\x12\x0f\n\x07message\x18\x01 \x01(\x05\x32\xcb\x01\n\x0f\x43VEPdummyScript\x12\x38\n\naddSeqData\x12\x12.addSeqDataRequest\x1a\x16.google.protobuf.Empty\x12=\n\x0c\x64\x65\x63odeChoice\x12\x16.google.protobuf.Empty\x1a\x15.decodeChoiceResponse\x12?\n\rtrainingModel\x12\x16.google.protobuf.Empty\x1a\x16.trainingModelResponseb\x06proto3')
+
+_globals = globals()
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'CVEPdummyScript_pb2', _globals)
+if not _descriptor._USE_C_DESCRIPTORS:
+ DESCRIPTOR._loaded_options = None
+ _globals['_ADDSEQDATAREQUEST']._serialized_start=54
+ _globals['_ADDSEQDATAREQUEST']._serialized_end=112
+ _globals['_DECODECHOICERESPONSE']._serialized_start=114
+ _globals['_DECODECHOICERESPONSE']._serialized_end=153
+ _globals['_TRAININGMODELRESPONSE']._serialized_start=155
+ _globals['_TRAININGMODELRESPONSE']._serialized_end=195
+ _globals['_CVEPDUMMYSCRIPT']._serialized_start=198
+ _globals['_CVEPDUMMYSCRIPT']._serialized_end=401
+# @@protoc_insertion_point(module_scope)
diff --git a/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/CVEPdummyScript_pb2_grpc.py b/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/CVEPdummyScript_pb2_grpc.py
new file mode 100644
index 00000000..95a45cef
--- /dev/null
+++ b/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/CVEPdummyScript_pb2_grpc.py
@@ -0,0 +1,184 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+"""Client and server classes corresponding to protobuf-defined services."""
+import grpc
+import warnings
+
+import CVEPdummyScript_pb2 as CVEPdummyScript__pb2
+from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
+
+GRPC_GENERATED_VERSION = '1.67.0'
+GRPC_VERSION = grpc.__version__
+_version_not_supported = False
+
+try:
+ from grpc._utilities import first_version_is_lower
+ _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION)
+except ImportError:
+ _version_not_supported = True
+
+if _version_not_supported:
+ raise RuntimeError(
+ f'The grpc package installed is at version {GRPC_VERSION},'
+ + f' but the generated code in CVEPdummyScript_pb2_grpc.py depends on'
+ + f' grpcio>={GRPC_GENERATED_VERSION}.'
+ + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}'
+ + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.'
+ )
+
+
+class CVEPdummyScriptStub(object):
+ """Missing associated documentation comment in .proto file."""
+
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.addSeqData = channel.unary_unary(
+ '/CVEPdummyScript/addSeqData',
+ request_serializer=CVEPdummyScript__pb2.addSeqDataRequest.SerializeToString,
+ response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
+ _registered_method=True)
+ self.decodeChoice = channel.unary_unary(
+ '/CVEPdummyScript/decodeChoice',
+ request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
+ response_deserializer=CVEPdummyScript__pb2.decodeChoiceResponse.FromString,
+ _registered_method=True)
+ self.trainingModel = channel.unary_unary(
+ '/CVEPdummyScript/trainingModel',
+ request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
+ response_deserializer=CVEPdummyScript__pb2.trainingModelResponse.FromString,
+ _registered_method=True)
+
+
+class CVEPdummyScriptServicer(object):
+ """Missing associated documentation comment in .proto file."""
+
+ def addSeqData(self, request, context):
+ """Missing associated documentation comment in .proto file."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def decodeChoice(self, request, context):
+ """Missing associated documentation comment in .proto file."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def trainingModel(self, request, context):
+ """Missing associated documentation comment in .proto file."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+
+def add_CVEPdummyScriptServicer_to_server(servicer, server):
+ rpc_method_handlers = {
+ 'addSeqData': grpc.unary_unary_rpc_method_handler(
+ servicer.addSeqData,
+ request_deserializer=CVEPdummyScript__pb2.addSeqDataRequest.FromString,
+ response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
+ ),
+ 'decodeChoice': grpc.unary_unary_rpc_method_handler(
+ servicer.decodeChoice,
+ request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
+ response_serializer=CVEPdummyScript__pb2.decodeChoiceResponse.SerializeToString,
+ ),
+ 'trainingModel': grpc.unary_unary_rpc_method_handler(
+ servicer.trainingModel,
+ request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
+ response_serializer=CVEPdummyScript__pb2.trainingModelResponse.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'CVEPdummyScript', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
+ server.add_registered_method_handlers('CVEPdummyScript', rpc_method_handlers)
+
+
+ # This class is part of an EXPERIMENTAL API.
+class CVEPdummyScript(object):
+ """Missing associated documentation comment in .proto file."""
+
+ @staticmethod
+ def addSeqData(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/CVEPdummyScript/addSeqData',
+ CVEPdummyScript__pb2.addSeqDataRequest.SerializeToString,
+ google_dot_protobuf_dot_empty__pb2.Empty.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
+
+ @staticmethod
+ def decodeChoice(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/CVEPdummyScript/decodeChoice',
+ google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
+ CVEPdummyScript__pb2.decodeChoiceResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
+
+ @staticmethod
+ def trainingModel(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/CVEPdummyScript/trainingModel',
+ google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
+ CVEPdummyScript__pb2.trainingModelResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
diff --git a/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/NeuralCooked.proto b/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/NeuralCooked.proto
new file mode 100644
index 00000000..456122e2
--- /dev/null
+++ b/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/NeuralCooked.proto
@@ -0,0 +1,17 @@
+syntax = "proto3";
+import "google/protobuf/empty.proto";
+service NeuralCooked {
+ rpc add_seq_data(add_seq_dataRequest) returns (google.protobuf.Empty);
+ rpc decode(google.protobuf.Empty) returns (decodeResponse);
+ rpc training(google.protobuf.Empty) returns (trainingResponse);
+}
+message add_seq_dataRequest {
+ int32 sequenceNum = 1;
+ float duration = 2;
+}
+message decodeResponse {
+ int32 message = 1;
+}
+message trainingResponse {
+ int32 message = 1;
+}
\ No newline at end of file
diff --git a/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/NeuralCooked.py b/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/NeuralCooked.py
new file mode 100644
index 00000000..2a15f61e
--- /dev/null
+++ b/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/NeuralCooked.py
@@ -0,0 +1,319 @@
+from scipy.signal import butter, filtfilt, iirnotch
+from sklearn.cross_decomposition import CCA
+import numpy as np
+from physiolabxr.scripting.RenaScript import RenaScript
+from physiolabxr.utils.buffers import DataBuffer
+from physiolabxr.rpc.decorator import async_rpc
+import time
+class NeuralCooked(RenaScript):
+
+ def __init__(self, *args, **kwargs):
+ """
+ Please do not edit this function
+ """
+ super().__init__(*args, **kwargs)
+
+ def init(self):
+ self.freqBands = [(8, 60), (13, 60), (30, 60)] #defining frequency bands for filter bank
+ self.frequency = 300 #default frequency of DSI-24
+ self.refreshRate = 0.0166 #Duration of flicker
+ self.data = DataBuffer() #generating a data buffer for EEG data
+ self.templates = {}
+ self.ccaModel = {} #creating a list to store all of the CCA models
+ self.ccaResults= {}
+ self.decodedChoices = [] #creating a list to store all of the decoded choices
+ self.mSequence = [
+ [1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0], #mSequence1
+ [1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0], #mSequence2
+ [1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0] #mSequence3
+ ]
+ self.sequenceLength = len(self.mSequence[0])
+ self.segmentLength = int(np.ceil(self.sequenceLength*self.frequency * self.refreshRate))
+ self.mSequenceSignal = {
+ 'sequence1': self.generateMSignal(0),
+ 'sequence2': self.generateMSignal(1),
+ 'sequence3': self.generateMSignal(2)
+ }
+ self.seq1Data = np.array([[[ ]]])
+ self.seq2Data = np.array([[[ ]]])
+ self.seq3Data = np.array([[[ ]]])
+ self.gameState = 0
+
+
+ def loop(self):
+ if self.inputs:
+ unfilteredData = self.inputs['DSI24'][0][14:18, :].astype(float)
+ filteredData1 = self.bandpassFilter(unfilteredData, 2, 120)
+ filteredData2 = self.notchFilter(filteredData1, 60)
+
+
+ EEG_Data = { #creating a dictionary for EEG data
+ 'stream_name': 'EEG Data', #defining the stream name
+ 'frames': filteredData2, #choosing the correct channels
+ 'timestamps': self.inputs['DSI24'][1].astype(float) #defining the timestamps
+ }
+ self.data.update_buffer(EEG_Data) #updating the data buffer with EEG data
+ Data = self.data.get_data('EEG Data')
+ if Data.shape[1] > 400: #if the data is longer than 12 seconds then cut off beginning of data so that it is to 200 seconds
+ self.data.clear_stream_up_to_index(stream_name='EEG Data', cut_to_index=self.data.get_data('EEG Data').shape[1]-400)
+ if self.data.get_data('EEG Data').shape[1] > self.segmentLength:
+ if self.gameState == 1:
+ self.decodeChoice()
+
+
+
+ def cleanup(self):
+ self.freqBands = [(8, 60), (12, 60), (30, 60)]
+ self.mSequence = []
+ self.frequency = 300
+ self.data = DataBuffer()
+ self.ccaModels = []
+ self.decodedChoices = []
+ self.gameState = 0
+
+ return
+
+ #Data Manipulation
+ def bandpassFilter(self, data, lowcutoff, highcutoff):
+ """
+ Function that takes in data and applies a bandpass filter to it
+ :param data: EEG data to be band passed
+ :param lowcutoff: Low pass
+ :param highcutoff: High pass
+ :return: A band passed version of the data
+ """
+ nq = 0.5 * self.frequency
+ order = 3
+ lowcutOffNorm = lowcutoff / nq
+ highcutOffNorm = highcutoff / nq
+ b, a = butter(order, [ lowcutOffNorm, highcutOffNorm], btype = 'band')
+ BPdata = filtfilt(b,a,data)
+ return BPdata
+ def notchFilter(self, data, notchfreq, qualityFactor = 30):
+ """
+ Function that takes in data and applies a notch filter to it
+ :param data: signal to be filtered
+ :param notchfreq: frequency to notch
+ :param qualityFactor: quality factor of the notch filter that determines the width of the notch
+ :return: filtered array
+ """
+ # Design the notch filter
+ b, a = iirnotch(notchfreq, qualityFactor, self.frequency)
+ # Apply the filter to the data
+ filteredData = filtfilt(b, a, data)
+ return filteredData
+ def applyFilterBank(self, data):
+ """
+ Function that takes segmented data filters it with a bandpass filter, and averages the signal out over all the
+ segments to return a dictionary that contains 3 arrays for each frequency band
+ :param data: list of data to be filtered after data has been segmented
+ :return: a dictionary that contains 3 arrays for each frequency band where the keys are the found in self.freqBands
+ """
+ band = {} #Dictionary created to fill in
+ for i in range(3):
+ filteredSegments = np.empty((4,self.segmentLength,0)) # List to hold filtered segments for the current frequency band
+ # for segment in data:
+ # # Apply bandpass filter to each segment and append the result to the list
+ # filteredSegment = self.bandpassFilter(segment, self.freqBands[i][0], self.freqBands[i][1])
+ # filteredSegments = np.concatenate((filteredSegments, filteredSegment[:, :, np.newaxis]), axis =2)
+ for segmentNum in range(data.shape[2]):
+ # Apply bandpass filter to each segment and append the result to the list
+ filteredSegment = self.bandpassFilter(data[:, :, segmentNum], self.freqBands[i][0], self.freqBands[i][1])
+ filteredSegments = np.concatenate((filteredSegments, filteredSegment[:, :, np.newaxis]), axis =2)
+
+ band[self.freqBands[i]]= np.mean(filteredSegments, axis=2)
+ # Average the filtered segments across the first axis
+ # averageSegments = np.mean(filteredSegments, axis=2)
+ # Downsampling
+ # band[self.freqBands[i]] = np.mean(averageSegments.reshape(-1, 5), axis=1)
+ return band
+ def adjustSegments(self, segments):
+ """
+ Function that ensures that all segments created by the splitting of data to be the same shape as the
+ expected segment length
+ :param segments: EEG data that has been segmented
+ :return: Returns the segments but truncated to be the same size as self.segmentLength
+ """
+ adjustedSegments = []
+ for segment in segments:
+ # If the segment is shorter than the desired length, pad it with zeros
+ if segment.shape[1] < self.segmentLength:
+ padding = np.ones((segment.shape[0], self.segmentLength - segment.shape[1]))
+ adjustedSegment = np.hstack((segment, padding)) # Pad with zeros
+ else:
+ # If the segment is longer, trim it to the desired length
+ adjustedSegment = segment[:, :self.segmentLength]
+
+ adjustedSegments.append(adjustedSegment)
+
+ return adjustedSegments
+ def createTemplates(self):
+ """
+ Function that generates templates of the training EEG data by segmenting the data based on expected length of
+ samples within one iteration of the m-sequence playing in Unity and averaging the data out.
+ :param: EEG data for segmenting into templates
+ :return: dictionary of segment numbers that contain a dictionary of frequency banded EEG data that has been
+ averaged out for each segment: segment number -> keys frequency band
+ """
+ filteredData1= np.zeros(self.seq1Data.shape)
+ filteredData2= np.zeros(self.seq2Data.shape)
+ filteredData3= np.zeros(self.seq3Data.shape)
+ for segment in range(self.seq1Data.shape[2]):
+ filteredData1[:,:,segment] = self.bandpassFilter(self.notchFilter(self.seq1Data[:, :, segment], 60), 2, 120)
+ filteredData2[:,:,segment] = self.bandpassFilter(self.notchFilter(self.seq2Data[:, :, segment], 60), 2, 120)
+ filteredData3[:,:,segment] = self.bandpassFilter(self.notchFilter(self.seq3Data[:, :, segment], 60), 2, 120)
+
+ self.templates['sequence1'] = self.applyFilterBank(filteredData1) #size_segment_length
+ self.templates['sequence2'] = self.applyFilterBank(filteredData2)
+ self.templates['sequence3'] = self.applyFilterBank(filteredData3)
+ def generateMSignal(self, seqNum):
+ """
+ Function to generate a signal template for the m-sequence for CCA.fit. It functions by repeating the sequence
+ element by a certain number of times based on the sampling rate of the EEG and the refresh rate of the monitor.
+ :param seqNum: The number associated with which m-sequence the function will generate a signal for
+ :return: Returns an 8 x segmentLength array where each row is the m-sequence stretched to fit the required
+ segment length necessary for analysis via CCA
+ """
+ samplesPerBit = self.segmentLength // len(self.mSequence[seqNum])
+ signal = np.repeat(self.mSequence[seqNum], samplesPerBit )
+
+ if len(signal) > self.segmentLength:
+ signal = signal[:, :self.segmentLength]
+
+
+ elif len(signal) < self.segmentLength:
+ padding = int(np.ceil((self.segmentLength - len(signal))/2)+1)
+ signal = np.pad(signal, pad_width=padding, mode='constant', constant_values=0)
+ signal = signal[:self.segmentLength]
+ mSignal = np.expand_dims(np.tile(signal, (4, 1)),axis=2)
+ filteredMsignal = self.applyFilterBank(mSignal)
+ return filteredMsignal
+
+ def trainCCA(self):
+ """
+ Training the CCA model
+ By generated spatial filters and templates for each target m-sequence
+ """
+ self.createTemplates()
+ self.ccaModel = {k: {sub_k: None for sub_k in v.keys()} for k, v in self.templates.items() if isinstance(v, dict)}
+ for sequence in self.templates.keys():
+ for freqBand in self.templates[sequence].keys():
+ cca = CCA(n_components=1)
+ cca.fit(self.templates[sequence][freqBand].T, self.mSequenceSignal[sequence][freqBand].T)
+ self.ccaModel[sequence][freqBand] = cca
+ test1,test2 = cca.transform(self.templates[sequence][freqBand].T, self.mSequenceSignal[sequence][freqBand].T)
+ print(np.abs(np.corrcoef(test1.T,test2.T)[0, 1]))
+
+ print('training done')
+ self.gameState = 1
+
+ @async_rpc
+ def add_seq_data(self, sequenceNum: int, duration: float):
+ """
+ RPC function that gets called by Unity to grab the EEG data from *duration* samples ago and adds it to the
+ respective m-sequence data storage
+ :param sequenceNum: The number that represents the m-sequence that was played for *duration* time
+ :param duration: How long the respective m-sequence was played for
+ :return: Does not return anything to Unity it is an asynchronous process
+ """
+
+ eegData = np.expand_dims(self.data.get_data('EEG Data')[:, -self.segmentLength:], axis=2) # 4 by (samples)
+ print(self.segmentLength,duration*300)
+ if sequenceNum == 1:
+ if self.seq1Data.size == 0:
+ self.seq1Data = eegData
+ else:
+ self.seq1Data = np.concatenate((self.seq1Data, eegData), axis=2)
+ elif sequenceNum == 2:
+ if self.seq2Data.size == 0:
+ self.seq2Data = eegData
+ else:
+ self.seq2Data = np.concatenate((self.seq2Data, eegData), axis=2)
+ elif sequenceNum == 3:
+ if self.seq3Data.size == 0:
+ self.seq3Data = eegData
+ else:
+ self.seq3Data = np.concatenate((self.seq3Data, eegData), axis=2)
+
+ @async_rpc
+ def training(self) -> int:
+ """
+ RPC function that gets called by Unity to begin training off of the saved data from function: add_seq_data
+ :return: Returns 1 to Unity once it is done training
+ """
+ # Train the CCA
+ self.trainCCA() # start training the CCA
+ self.data.clear_buffer_data() #Clear the buffer after training
+ return 1
+ @async_rpc
+ def decode(self) -> int:
+ """
+ RPC function that gets called by Unity to receive what the player is looking at by sending Unity the most
+ frequently occurring m-sequence using the mode of the decoded choices
+ :return: Sends Unity the most common choice to Unity, sends 0 if there is no common choice
+ """
+
+ userChoice = 0
+ if len(self.decodedChoices) > 5:
+ choices = [x for x in self.decodedChoices if x is not None]
+ counts = {choice: choices.count(choice) for choice in set(choices)}
+ max_count = max(counts.values())
+ modes = [choice for choice, count in counts.items() if count == max_count]
+ userChoice = modes[0] if len(modes) == 1 else 0
+ self.decodedChoices = []
+ self.data.clear_buffer_data()
+ return userChoice
+
+ def decodeChoice(self):
+ """
+ A looping function that decodes the correlation coefficients of the data and adds the highest correlation
+ m-sequence (what m-sequence the user is likely looking at) to a list called decodedChoices
+ :return: Updates decodedChoice with what has been decoded
+ """
+ data = np.expand_dims(self.data.get_data('EEG Data')[:, -self.segmentLength:],axis=2)
+ self.correlationCoefficients = self.applyCCA(data) # getting the correlation coefficients by applying CCA
+ highestCorrelation, detectedChoice = self.evaluateCorrelationCoefficients(self.correlationCoefficients) # evaluating the correlation coefficients to get the highest correlation and the detected choice
+ if detectedChoice != None:
+ self.decodedChoices.append(detectedChoice)
+
+ def applyCCA(self, data):
+ """
+ Function for applying CCA
+ :param data: Data that needs to be classified
+ :return: A dictionary that contains the correlation of the EEG data with the templates
+ """
+
+ #Filter the data
+ filteredData = self.applyFilterBank(data)
+ self.ccaResults = {k: {sub_k: None for sub_k in v.keys()} for k, v in self.templates.items() if isinstance(v, dict)} #temporarily assigns the dictionary so that we can use the keys
+
+ correlation = {k: {sub_k: None for sub_k in v.keys()} for k, v in self.templates.items() if isinstance(v, dict)}
+ avgCorrelation = {}
+ #Transform the data with CCA
+ for sequence in self.templates.keys():
+ for freqBand in self.templates[sequence].keys():
+ cca = self.ccaModel[sequence][freqBand]
+ self.ccaResults[sequence][freqBand], refMSeq = cca.transform(filteredData[freqBand].T, self.mSequenceSignal[sequence][freqBand].T)
+ correlation[sequence][freqBand] = np.abs(np.corrcoef(self.ccaResults[sequence][freqBand].T, refMSeq.T)[0, 1])
+ print(sequence, freqBand, correlation[sequence][freqBand])
+ avgCorrelation[sequence] = np.mean(list(correlation[sequence].values()))
+ return avgCorrelation
+
+ def evaluateCorrelationCoefficients(self, correlationCoefficients):
+ """
+ Function for determining the highest correlation coefficient and whether the coefficient is greater than the
+ second-highest correlation by a certain threshold.
+ :param correlationCoefficients: Correlation coefficients between the EEG data and the templates
+ :return: The highest correlation as well as the corresponding m-sequence number if the correlation surpasses the
+ threshold
+ """
+ # Sort the sequences by their average correlation in descending order
+ sortedCorrelations = sorted(correlationCoefficients.items(), key=lambda item: item[1], reverse=True)
+ # Get the highest and second-highest correlations
+ highestSequence, highestCorr = sortedCorrelations[0]
+ secondHighestSequence, secondHighestCorr = sortedCorrelations[1]
+ # Check if the highest correlation is at least 0.15 higher than the second highest
+ if highestCorr >= secondHighestCorr + 0.01:
+ return highestCorr, int(highestSequence[-1])
+ else:
+ return highestCorr, None
diff --git a/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/NeuralCookedServer.py b/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/NeuralCookedServer.py
new file mode 100644
index 00000000..f7d2a28d
--- /dev/null
+++ b/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/NeuralCookedServer.py
@@ -0,0 +1,17 @@
+from google.protobuf import empty_pb2
+from google.protobuf.json_format import MessageToDict
+import NeuralCooked_pb2_grpc, NeuralCooked_pb2
+
+class NeuralCookedServer(NeuralCooked_pb2_grpc.NeuralCookedServicer):
+ script_instance = None
+ async def add_seq_data(self, request, context):
+ result = self.script_instance.add_seq_data(**MessageToDict(request))
+ return empty_pb2.Empty()
+
+ async def decode(self, request, context):
+ result = self.script_instance.decode()
+ return NeuralCooked_pb2.decodeResponse(message=result)
+
+ async def training(self, request, context):
+ result = self.script_instance.training()
+ return NeuralCooked_pb2.trainingResponse(message=result)
diff --git a/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/NeuralCooked_pb2.py b/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/NeuralCooked_pb2.py
new file mode 100644
index 00000000..14390554
--- /dev/null
+++ b/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/NeuralCooked_pb2.py
@@ -0,0 +1,43 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# NO CHECKED-IN PROTOBUF GENCODE
+# source: NeuralCooked.proto
+# Protobuf Python Version: 5.27.2
+"""Generated protocol buffer code."""
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import runtime_version as _runtime_version
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf.internal import builder as _builder
+_runtime_version.ValidateProtobufRuntimeVersion(
+ _runtime_version.Domain.PUBLIC,
+ 5,
+ 27,
+ 2,
+ '',
+ 'NeuralCooked.proto'
+)
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x12NeuralCooked.proto\x1a\x1bgoogle/protobuf/empty.proto\"<\n\x13\x61\x64\x64_seq_dataRequest\x12\x13\n\x0bsequenceNum\x18\x01 \x01(\x05\x12\x10\n\x08\x64uration\x18\x02 \x01(\x02\"!\n\x0e\x64\x65\x63odeResponse\x12\x0f\n\x07message\x18\x01 \x01(\x05\"#\n\x10trainingResponse\x12\x0f\n\x07message\x18\x01 \x01(\x05\x32\xb6\x01\n\x0cNeuralCooked\x12<\n\x0c\x61\x64\x64_seq_data\x12\x14.add_seq_dataRequest\x1a\x16.google.protobuf.Empty\x12\x31\n\x06\x64\x65\x63ode\x12\x16.google.protobuf.Empty\x1a\x0f.decodeResponse\x12\x35\n\x08training\x12\x16.google.protobuf.Empty\x1a\x11.trainingResponseb\x06proto3')
+
+_globals = globals()
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'NeuralCooked_pb2', _globals)
+if not _descriptor._USE_C_DESCRIPTORS:
+ DESCRIPTOR._loaded_options = None
+ _globals['_ADD_SEQ_DATAREQUEST']._serialized_start=51
+ _globals['_ADD_SEQ_DATAREQUEST']._serialized_end=111
+ _globals['_DECODERESPONSE']._serialized_start=113
+ _globals['_DECODERESPONSE']._serialized_end=146
+ _globals['_TRAININGRESPONSE']._serialized_start=148
+ _globals['_TRAININGRESPONSE']._serialized_end=183
+ _globals['_NEURALCOOKED']._serialized_start=186
+ _globals['_NEURALCOOKED']._serialized_end=368
+# @@protoc_insertion_point(module_scope)
diff --git a/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/NeuralCooked_pb2_grpc.py b/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/NeuralCooked_pb2_grpc.py
new file mode 100644
index 00000000..129b6743
--- /dev/null
+++ b/physiolabxr/scripting/WearableSensing/WearableSensingNeuralCooked/NeuralCooked_pb2_grpc.py
@@ -0,0 +1,184 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+"""Client and server classes corresponding to protobuf-defined services."""
+import grpc
+import warnings
+
+import NeuralCooked_pb2 as NeuralCooked__pb2
+from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
+
+GRPC_GENERATED_VERSION = '1.67.0'
+GRPC_VERSION = grpc.__version__
+_version_not_supported = False
+
+try:
+ from grpc._utilities import first_version_is_lower
+ _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION)
+except ImportError:
+ _version_not_supported = True
+
+if _version_not_supported:
+ raise RuntimeError(
+ f'The grpc package installed is at version {GRPC_VERSION},'
+ + f' but the generated code in NeuralCooked_pb2_grpc.py depends on'
+ + f' grpcio>={GRPC_GENERATED_VERSION}.'
+ + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}'
+ + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.'
+ )
+
+
+class NeuralCookedStub(object):
+ """Missing associated documentation comment in .proto file."""
+
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.add_seq_data = channel.unary_unary(
+ '/NeuralCooked/add_seq_data',
+ request_serializer=NeuralCooked__pb2.add_seq_dataRequest.SerializeToString,
+ response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
+ _registered_method=True)
+ self.decode = channel.unary_unary(
+ '/NeuralCooked/decode',
+ request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
+ response_deserializer=NeuralCooked__pb2.decodeResponse.FromString,
+ _registered_method=True)
+ self.training = channel.unary_unary(
+ '/NeuralCooked/training',
+ request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
+ response_deserializer=NeuralCooked__pb2.trainingResponse.FromString,
+ _registered_method=True)
+
+
+class NeuralCookedServicer(object):
+ """Missing associated documentation comment in .proto file."""
+
+ def add_seq_data(self, request, context):
+ """Missing associated documentation comment in .proto file."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def decode(self, request, context):
+ """Missing associated documentation comment in .proto file."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def training(self, request, context):
+ """Missing associated documentation comment in .proto file."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+
+def add_NeuralCookedServicer_to_server(servicer, server):
+ rpc_method_handlers = {
+ 'add_seq_data': grpc.unary_unary_rpc_method_handler(
+ servicer.add_seq_data,
+ request_deserializer=NeuralCooked__pb2.add_seq_dataRequest.FromString,
+ response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
+ ),
+ 'decode': grpc.unary_unary_rpc_method_handler(
+ servicer.decode,
+ request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
+ response_serializer=NeuralCooked__pb2.decodeResponse.SerializeToString,
+ ),
+ 'training': grpc.unary_unary_rpc_method_handler(
+ servicer.training,
+ request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
+ response_serializer=NeuralCooked__pb2.trainingResponse.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'NeuralCooked', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
+ server.add_registered_method_handlers('NeuralCooked', rpc_method_handlers)
+
+
+ # This class is part of an EXPERIMENTAL API.
+class NeuralCooked(object):
+ """Missing associated documentation comment in .proto file."""
+
+ @staticmethod
+ def add_seq_data(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/NeuralCooked/add_seq_data',
+ NeuralCooked__pb2.add_seq_dataRequest.SerializeToString,
+ google_dot_protobuf_dot_empty__pb2.Empty.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
+
+ @staticmethod
+ def decode(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/NeuralCooked/decode',
+ google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
+ NeuralCooked__pb2.decodeResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
+
+ @staticmethod
+ def training(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/NeuralCooked/training',
+ google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
+ NeuralCooked__pb2.trainingResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
diff --git a/physiolabxr/scripting/WearableSensing/WearableSensing_MI_BalanceBall.py b/physiolabxr/scripting/WearableSensing/WearableSensing_MI_BalanceBall.py
new file mode 100644
index 00000000..222f1301
--- /dev/null
+++ b/physiolabxr/scripting/WearableSensing/WearableSensing_MI_BalanceBall.py
@@ -0,0 +1,223 @@
+import os
+from enum import Enum
+
+import mne
+import numpy as np
+from mne import create_info
+from mne.decoding import CSP
+from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
+from sklearn.pipeline import Pipeline
+
+from physiolabxr.scripting.RenaScript import RenaScript
+from physiolabxr.scripting.physio.epochs import get_event_locked_data
+from physiolabxr.utils.buffers import flatten
+
+event_marker_stream_name = 'EventMarker_BallGame'
+class GameStates(Enum):
+ idle = 'idle'
+ train = 'train'
+ fit = 'fit'
+ eval = 'eval'
+
+class Events(Enum):
+ train_start = 1
+ left_trial = 2
+ right_trial = 3
+ eval_start = 6
+
+class CSPDecoder:
+
+ def __init__(self, n_components=4):
+ self.n_components = n_components
+ self.csp = CSP(n_components=n_components, reg=None, log=True, norm_trace=False)
+ self.lda = LinearDiscriminantAnalysis()
+ self.clf = Pipeline([('CSP', self.csp), ('LDA', self.lda)])
+
+ def fit(self, X, y):
+ X = self.csp.fit_transform(X, y)
+ # fit classifier
+ self.lda.fit(X, y)
+
+ def csp_transfomr(self, X):
+ return self.csp.transform(X)
+
+ def transform(self, X):
+ X = self.csp.transform(X)
+ return self.lda.transform(X)
+
+def sigmoid(z):
+ return 1/(1 + np.exp(-z))
+
+class MovingAverage:
+ def __init__(self, window_size=10):
+ self.window_size = window_size
+ self.buffer = []
+
+ def update(self, value):
+ self.buffer.append(value)
+ if len(self.buffer) > self.window_size:
+ self.buffer.pop(0)
+
+ def get(self):
+ return np.mean(self.buffer)
+
+class MotorImageryBalanceBall(RenaScript):
+ def __init__(self, *args, **kwargs):
+ """
+ Please do not edit this function
+ """
+ super().__init__(*args, **kwargs)
+ self.cur_state = 'idle'
+
+
+ # Start will be called once when the run button is hit.
+ def init(self):
+ """
+
+ Note on script params:
+
+ """
+ # self.train_data_buffer = DataBuffer()
+ # self.eval_data_buffer = DataBuffer()
+ self.cur_state = 'idle'
+ self.transition_markers = [Events.train_start.value, -Events.train_start.value, Events.eval_start.value, -Events.eval_start.value]
+ self.eeg_channels = ["F3", "Fz", "F4", "C3", "Cz", "C4", "P3", "P4", ]
+ self.decoder_tmin = 2.
+ self.decoder_tmax = 5.
+ self.srate = 128
+ self.decode_t_len = int((self.decoder_tmax - self.decoder_tmin) * self.srate)
+ self.label_mapping = {2: 0, 3: 1}
+ self.decoder = None
+ self.moving_average = MovingAverage(window_size=3)
+
+ self.use_aggregated_data = False
+ if "participant_name" in self.params and type(self.params["participant_name"]) is str and self.params["participant_name"] != "" and \
+ "participants_data_dir" in self.params and type(self.params["participants_data_dir"]) is str and os.path.exists(self.params["participants_data_dir"]):
+ self.use_aggregated_data = True
+ print("Will use aggregated data. To not use aggregated data, remove participant_name or participant_data_dir from the parameters tab. Or"
+ "set participant_name to empty string or participant_data_dir to a non-existing directory.")
+ else:
+ self.use_aggregated_data = False
+ print("Will not use aggregated data. To use aggregated data, please set participant_name and participant_data_dir in the parameters tab."
+ "and participant_name is not empty string and participant_data_dir exists.")
+
+
+ # loop is called times per second
+ def loop(self):
+
+ if event_marker_stream_name not in self.inputs.keys(): # or #EVENT_MARKER_CHANNEL_NAME not in self.inputs.keys():
+ # print('Event marker stream not found')
+ return
+
+ self.process_event_markers()
+ if self.cur_state == GameStates.train:
+ pass
+ # keep collecting data
+ # print("In training")
+ elif self.cur_state == GameStates.eval:
+ self.decode()
+ # print("In evaluation")
+
+ # cleanup is called when the stop button is hit
+ def cleanup(self):
+ print('Cleanup function is called')
+
+ def process_event_markers(self):
+ if event_marker_stream_name in self.inputs.keys() and len(np.intersect1d(self.inputs[event_marker_stream_name][0], self.transition_markers)) > 0:
+ last_processed_marker_index = None
+ for i, event_marker in enumerate(self.inputs[event_marker_stream_name][0].T):
+ game_event_marker = event_marker[0]
+ print(f'Event marker is {event_marker} at index {i}')
+
+ # state transition logic
+ if game_event_marker == Events.train_start.value:
+ self.cur_state = GameStates.train
+ print('Entering training block')
+ last_processed_marker_index = i
+
+ elif game_event_marker == -Events.train_start.value: # exiting train state
+ # collect the trials and train the decoding model
+ self.collect_trials_and_train()
+ self.cur_state = GameStates.idle
+ print('Exiting training block')
+ last_processed_marker_index = i
+
+ elif event_marker == Events.eval_start.value:
+ self.cur_state = GameStates.eval
+ print('Entering evaluation block')
+ last_processed_marker_index = i
+
+ elif event_marker == -Events.eval_start.value:
+ self.cur_state = GameStates.idle
+ print('Exiting evaluation block')
+ last_processed_marker_index = i
+
+ # # collect event marker data
+ # if self.cur_state == GameStates.train:
+ # event_type = game_state_event_marker
+ # timestamp = self.inputs[event_marker_stream_name][1][i]
+ #
+ # # self.train_data_buffer.
+ # pass
+ #
+ # elif self.cur_state == GameStates.eval:
+ # pass
+
+ # self.inputs.clear_stream_buffer_data(event_marker_stream_name)
+ if last_processed_marker_index is not None:
+ self.inputs.clear_stream_up_to_index(event_marker_stream_name, last_processed_marker_index+1)
+
+ def collect_trials_and_train(self):
+ event_locked_data, last_event_time = get_event_locked_data(event_marker=self.inputs[event_marker_stream_name],
+ data=self.inputs["DSI-24"][3,2,4,8,7,9,15,16],
+ events_of_interest=[Events.left_trial.value, Events.right_trial.value],
+ tmin=self.decoder_tmin, tmax=self.decoder_tmax, srate=self.srate, return_last_event_time=True, verbose=1)
+ # TODO check the shape of the event locked data, how long is it. does it equal decode_t_len
+
+ train_end_index = np.argwhere(self.inputs[event_marker_stream_name][0][0] == - Events.train_start.value).item()
+ train_end_time = self.inputs[event_marker_stream_name][1][train_end_index]
+ self.inputs.clear_up_to(train_end_time) # Clear the input buffer up to the last event time to avoid processing duplicate data
+
+ # build the classifier, ref https://mne.tools/dev/auto_examples/decoding/decoding_csp_eeg.html
+ labels = flatten([[events] * len(data) for events, data in event_locked_data.items()])
+ labels = np.array([self.label_mapping[label] for label in labels])
+ epochs_data = np.concatenate(list(event_locked_data.values()), axis=0)
+ info = create_info(ch_names=self.eeg_channels, sfreq=self.srate, ch_types='eeg')
+ montage = mne.channels.make_standard_montage("biosemi64")
+ info.set_montage(montage)
+
+ if self.use_aggregated_data:
+ participant_dir = os.path.join(self.params["participants_data_dir"], self.params["participant_name"])
+ if os.path.exists(participant_dir): # use aggregated data
+ loaded_epochs = np.load(os.path.join(participant_dir, "epochs_data.npy"))
+ epochs_data = np.concatenate((epochs_data, loaded_epochs))
+ labels = np.concatenate((labels, np.load(os.path.join(participant_dir, "labels.npy"))))
+ print(f"Post-train: Loaded {len(loaded_epochs)} for participant {self.params['participant_name']}. Concatenated with current data. Total data size: {epochs_data.shape}")
+ else: # if this is a new participant, create the directory
+ os.makedirs(participant_dir)
+ print(f"Post-train: New participant: {self.params['participant_name']} Created directory {os.path.join(self.params['participants_data_dir'], self.params['participant_name'])}")
+ # save the data
+ np.save(os.path.join(self.params["participants_data_dir"], self.params["participant_name"], "epochs_data.npy"), epochs_data)
+ np.save(os.path.join(self.params["participants_data_dir"], self.params["participant_name"], "labels.npy"), labels)
+ print(f"Post-train: Saved {len(epochs_data)} for participant {self.params['participant_name']}")
+
+ self.decoder = CSPDecoder(n_components=4)
+ self.decoder.fit(epochs_data, labels)
+ # get the classification score
+ y_pred = self.decoder.transform(epochs_data)
+ score = self.decoder.lda.score(self.decoder.csp_transfomr(epochs_data), labels)
+ print(f"Fitting completed. Classification score: {score}. Plotting CSP...")
+ self.decoder.csp.plot_patterns(info, ch_type="eeg", units="Patterns (AU)", size=1.5)
+
+ def decode(self):
+ if "ma_window" in self.params and self.moving_average.window_size != self.params["ma_window"]:
+ self.moving_average = MovingAverage(window_size=self.params["ma_window"])
+ data = self.inputs["DSI-24"][0][None, :, -self.decode_t_len:]
+ y_pred = self.decoder.transform(data)[0] # only one sample in batch
+ # normalize y_pred from -10 to 10 to 0 to 1
+ y_pred = sigmoid(y_pred)
+ # apply moving average
+ self.moving_average.update(y_pred[0])
+ self.outputs["MotorImageryInference"] = [self.moving_average.get()]
+
+
diff --git a/physiolabxr/thirdparty/WearableSensing/DSI.h b/physiolabxr/thirdparty/WearableSensing/DSI.h
new file mode 100644
index 00000000..245930b7
--- /dev/null
+++ b/physiolabxr/thirdparty/WearableSensing/DSI.h
@@ -0,0 +1,276 @@
+/*
+
+# This file is part of the Application Programmer's Interface (API) for Dry Sensor Interface
+# (DSI) EEG systems by Wearable Sensing. The API consists of code, headers, dynamic libraries
+# and documentation. The API allows software developers to interface directly with DSI
+# systems to control and to acquire data from them.
+#
+# The API is not certified to any specific standard. It is not intended for clinical use.
+# The API, and software that makes use of it, should not be used for diagnostic or other
+# clinical purposes. The API is intended for research use and is provided on an "AS IS"
+# basis. WEARABLE SENSING, INCLUDING ITS SUBSIDIARIES, DISCLAIMS ANY AND ALL WARRANTIES
+# EXPRESSED, STATUTORY OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT OR THIRD PARTY RIGHTS.
+#
+# (c) @YEARS@ Wearable Sensing LLC
+
+Include this file as a header for your C or C++ code. See README.txt for more details.
+
+*/
+
+
+#undef EXTERNC
+#if defined __cplusplus && !defined SUPPRESS_EXTERNC
+extern "C" {
+#define EXTERNC
+#endif /* #ifdef __cplusplus */
+
+
+#define DSI_API_MAJOR_VERSION 1 /* NB: Use DSI_GetAPIVersion(), rather than these */
+#define DSI_API_MINOR_VERSION 18 /* values, to find out the version of the API that */
+#define DSI_API_BUILD 2 /* was used to compile your dynamic library. */
+ /* Verify that the result matches the values here. */
+
+#define DSI_API_VERSION STRINGIFY_VERSION( DSI_API_MAJOR_VERSION, DSI_API_MINOR_VERSION, DSI_API_BUILD )
+#define STRINGIFY_VERSION( major, minor, build ) _STRINGIFY( major ) "." STRINGIFY( minor ) "." STRINGIFY( build )
+#define STRINGIFY( s ) _STRINGIFY( s )
+#define _STRINGIFY( s ) #s
+
+
+/* ************************************************************************************ */
+#ifndef INCLUDED_DSI_H
+#define INCLUDED_DSI_H
+
+#include /* for size_t */
+typedef int bool_t;
+
+typedef char * DSI_Headset;
+typedef short * DSI_Channel;
+typedef long * DSI_Source;
+typedef double * DSI_ProcessingStage;
+
+typedef void ( *DSI_SampleCallback )( DSI_Headset h, double packetTime, void * userData );
+typedef int ( *DSI_MessageCallback )( const char * msg, int debugLevel );
+
+typedef enum {
+ ActiveSensors = 1,
+ IgnoredSensors = 2,
+ UnconnectedSensors = 4,
+ Reference = 8,
+ Triggers = 16,
+ Clocks = 32,
+ CommonModeSignal = 64,
+ NullSignal = 128,
+ TriggerBits = 256,
+ ConnectedSensors = ( ActiveSensors + IgnoredSensors ),
+ AllSensors = ( ConnectedSensors + UnconnectedSensors ),
+ Default = ( AllSensors + Triggers ),
+ Everything = 0xffff
+} DSI_SourceSelection;
+
+extern int Load_DSI_API( const char * dllname );
+
+#if defined _WIN32
+# define DYLIB_PREFIX "lib"
+# define DYLIB_EXTENSION ".dll"
+#elif defined __APPLE__
+# define DYLIB_PREFIX "lib"
+# define DYLIB_EXTENSION ".dylib"
+#else
+# define DYLIB_PREFIX "lib"
+# define DYLIB_EXTENSION ".so"
+#endif /* #if defined _WIN32 */
+#ifndef DSI_PLATFORM /* Define the DSI_PLATFORM macro when compiling your app to make it easier */
+# define DSI_PLATFORM /* for the app to find the correct build of the dynamic library by default */
+# define DSI_DYLIB_NAME( name ) ( ( name ) ? ( name ) : ( DYLIB_PREFIX "DSI" DYLIB_EXTENSION ) )
+#else /* Example: if you used the -DDSI_PLATFORM=-Darwin-x86_64 compiler flag then DSI_DYLIB_NAME( NULL ) would expand to "libDSI-Darwin-x86_64.dylib" */
+# define DSI_DYLIB_NAME( name ) ( ( name ) ? ( name ) : ( DYLIB_PREFIX "DSI" STRINGIFY( DSI_PLATFORM ) DYLIB_EXTENSION ) )
+#endif /* #ifndef DSI_PLATFORM */
+#ifndef DSI_API_FUNC
+# ifdef DSI_STATIC
+# define DSI_API_FUNC( type, name, args, implementation ) type name args; /* required if linking statically against DSI_C_Interface.cpp */
+# else
+# define DSI_API_FUNC( type, name, args, implementation ) extern type ( *name ) args; /* required if linking dynamically (default, normal use-case) */
+# endif /* #ifdef DSI_STATIC */
+#endif /* #ifndef DSI_API_FUNC */
+
+#endif /* #ifndef INCLUDED_DSI_H */
+/* ************************************************************************************ */
+
+
+/* **************************************************** */
+/* General-purpose functions */
+/* **************************************************** */
+
+DSI_API_FUNC( const char * , DSI_Error , ( void ) , { return gDSIErrorString; } )
+DSI_API_FUNC( const char * , DSI_ClearError , ( void ) , { const char * p = gDSIErrorString; gDSIErrorString = NULL; return p; } )
+DSI_API_FUNC( void , DSI_SetErrorCallback , ( DSI_MessageCallback func ) , { gDSIErrorHandler = func; } )
+
+DSI_API_FUNC( void , DSI_SanityCheck , ( void ) , DO( DSI::SanityCheck() ) )
+DSI_API_FUNC( int , DSI_Console , ( const char * msg, int debugLevel ) , RETURN( 0, DSI::Console( INPUTSTRING( msg ), debugLevel ) ) )
+DSI_API_FUNC( void , DSI_Sleep , ( double seconds ) , DO( DSI::Sleep( seconds ) ) )
+DSI_API_FUNC( const char * , DSI_PythonString , ( const void * raw, size_t nBytes, bool_t allhex ) , RETURNSTRING( DSI::PythonString( raw, nBytes, allhex ) ) )
+
+DSI_API_FUNC( const char * , DSI_GetAPIVersion , ( void ) , RETURNSTRING( DSI_API_VERSION ) )
+
+DSI_API_FUNC( const char * , DSI_GetDefaultPort , ( void ) , RETURNSTRING( DSI::DefaultPort() ) )
+
+
+/* **************************************************** */
+/* DSI_Headset methods */
+/* **************************************************** */
+
+DSI_API_FUNC( DSI_Headset , DSI_Headset_New , ( const char * port ) , RETURN( NULL, ( DSI_Headset )( ( port && *port ) ? new DSI::Headset( port ) : new DSI::Headset() ) ) )
+DSI_API_FUNC( void , DSI_Headset_Delete , ( DSI_Headset h ) , DO( delete HEADSET ) )
+
+DSI_API_FUNC( void , DSI_Headset_SetVerbosity , ( DSI_Headset h, int level ) , DO( HEADSET->SetVerbosity( level ) ) )
+DSI_API_FUNC( void , DSI_Headset_SetMessageCallback , ( DSI_Headset h, DSI_MessageCallback func ) , DO( HEADSET->SetMessageCallback( func ) ) )
+DSI_API_FUNC( void , DSI_Headset_SetSampleCallback , ( DSI_Headset h, DSI_SampleCallback func, void * userData ) , DO( HEADSET->SetSampleCallback( ( DSI::SampleCallback )func, userData ) ) )
+
+DSI_API_FUNC( void , DSI_Headset_Connect , ( DSI_Headset h, const char * port ) , DO( HEADSET->Connect( INPUTSTRING( port ) ) ) )
+DSI_API_FUNC( bool_t , DSI_Headset_IsConnected , ( DSI_Headset h ) , RETURN( 0, HEADSET->Connected() ) )
+DSI_API_FUNC( double , DSI_Headset_SecondsSinceConnection , ( DSI_Headset h ) , RETURN( -1.0, HEADSET->Elapsed() ) )
+DSI_API_FUNC( const char * , DSI_Headset_GetPort , ( DSI_Headset h ) , RETURNSTRING( HEADSET->Port() ) )
+DSI_API_FUNC( void , DSI_Headset_Disconnect , ( DSI_Headset h ) , DO( HEADSET->Disconnect() ) )
+DSI_API_FUNC( void , DSI_Headset_QueryUnlockedFeatures , ( DSI_Headset h ) , DO( HEADSET->QueryUnlockedFeatures() ) )
+DSI_API_FUNC( int , DSI_Headset_GetFeatureAvailability , ( DSI_Headset h, const char * featureName ) , RETURN( 0, HEADSET->FeatureAvailability( INPUTSTRING( featureName ) ) ) )
+DSI_API_FUNC( void , DSI_Headset_ConfigureADC , ( DSI_Headset h, unsigned int samplesPerSecond, unsigned int filterMode ) , DO( HEADSET->ConfigureAcquisitionSettings( samplesPerSecond, filterMode ) ) )
+DSI_API_FUNC( void , DSI_Headset_SetAccelerometerRate , ( DSI_Headset h, unsigned int rate ) , DO( HEADSET->SetAccelerometerRate( rate ) ) )
+
+
+DSI_API_FUNC( void , DSI_Headset_ChooseChannels , ( DSI_Headset h, const char * spec, const char * ref, bool_t autoswap ) , DO( HEADSET->ChooseChannels( INPUTSTRING( spec ), INPUTSTRING( ref ), autoswap ) ) )
+DSI_API_FUNC( void , DSI_Headset_AddChannelToMontage_FromSource , ( DSI_Headset h, DSI_Source s ) , DO( HEADSET->AddChannelToMontage( SOURCE ) ) )
+DSI_API_FUNC( void , DSI_Headset_AddChannelToMontage_FromString , ( DSI_Headset h, const char * spec, bool_t autoswap ) , DO( HEADSET->AddChannelToMontage( INPUTSTRING( spec ), autoswap ) ) )
+DSI_API_FUNC( const char * , DSI_Headset_SetTraditionalReference , ( DSI_Headset h, bool_t autoswap ) , RETURNSTRING( HEADSET->SetTraditionalReference( autoswap ) ) )
+DSI_API_FUNC( void , DSI_Headset_SetDefaultReference , ( DSI_Headset h, const char * spec, bool_t autoswap ) , DO( HEADSET->SetDefaultReference( INPUTSTRING( spec ), autoswap ) ) )
+DSI_API_FUNC( const char * , DSI_Headset_GetFactoryReferenceString , ( DSI_Headset h ) , RETURNSTRING( HEADSET->FactoryReferenceString() ) )
+DSI_API_FUNC( const char * , DSI_Headset_GetReferenceString , ( DSI_Headset h ) , RETURNSTRING( HEADSET->ReferenceString() ) )
+DSI_API_FUNC( void , DSI_Headset_ForgetMontage , ( DSI_Headset h ) , DO( HEADSET->ForgetMontage() ) )
+DSI_API_FUNC( void , DSI_Headset_UseNamingScheme , ( DSI_Headset h, const char * scheme ) , DO( HEADSET->UseNamingScheme( INPUTSTRING( scheme ) ) ) )
+DSI_API_FUNC( bool_t , DSI_Headset_RenameSource , ( DSI_Headset h, const char * from, const char * to ) , RETURN( 0, HEADSET->RenameSource( INPUTSTRING( from ), INPUTSTRING( to ) ) ) )
+DSI_API_FUNC( bool_t , DSI_Headset_AddSourceAliases , ( DSI_Headset h, const char * aliases ) , RETURN( 0, HEADSET->AddSourceAliases( INPUTSTRING( aliases ) ) ) )
+
+DSI_API_FUNC( const char * , DSI_Headset_GetHardwareModel , ( DSI_Headset h ) , RETURNSTRING( HEADSET->HardwareModel() ) )
+DSI_API_FUNC( const char * , DSI_Headset_GetHardwareRevision , ( DSI_Headset h ) , RETURNSTRING( HEADSET->HardwareRevision() ) )
+DSI_API_FUNC( int , DSI_Headset_GetNumberOfChannels , ( DSI_Headset h ) , RETURN( 0, ( int )HEADSET->Montage()->size() ) )
+DSI_API_FUNC( int , DSI_Headset_GetNumberOfSources , ( DSI_Headset h ) , RETURN( 0, ( int )HEADSET->Sources( false )->size() ) )
+DSI_API_FUNC( DSI_Channel , DSI_Headset_GetChannelByIndex , ( DSI_Headset h, unsigned int index ) , RETURN( ( DSI_Channel )NULL, ( DSI_Channel )HEADSET->GetChannelByIndex( index ) ) )
+DSI_API_FUNC( DSI_Channel , DSI_Headset_GetChannelByName , ( DSI_Headset h, const char * name ) , RETURN( ( DSI_Channel )NULL, ( DSI_Channel )HEADSET->GetChannelByName( INPUTSTRING( name ), true ) ) )
+DSI_API_FUNC( DSI_Source , DSI_Headset_GetSourceByIndex , ( DSI_Headset h, unsigned int index ) , RETURN( ( DSI_Source )NULL, ( DSI_Source )HEADSET->GetSourceByIndex( index, false ) ) )
+DSI_API_FUNC( DSI_Source , DSI_Headset_GetSourceByName , ( DSI_Headset h, const char * name ) , RETURN( ( DSI_Source )NULL, ( DSI_Source )HEADSET->MatchSource( INPUTSTRING( name ), true ) ) ) /* string should contain either a channel name or a 1-based index */
+
+DSI_API_FUNC( bool_t , DSI_Headset_IsBlueToothInitialized , ( DSI_Headset h ) , RETURN( 0, HEADSET->BlueToothInitialized() ) )
+DSI_API_FUNC( void , DSI_Headset_ReallocateBuffers , ( DSI_Headset h, double secondsForSignal, double secondsForImpedance ) , DO( HEADSET->ReallocateBuffers( secondsForSignal, secondsForImpedance ) ) )
+DSI_API_FUNC( void , DSI_Headset_FlushBuffers , ( DSI_Headset h ) , DO( HEADSET->FlushBuffers() ) )
+DSI_API_FUNC( size_t , DSI_Headset_GetNumberOfBufferedSamples , ( DSI_Headset h ) , RETURN( 0, HEADSET->Buffered() ) )
+DSI_API_FUNC( size_t , DSI_Headset_GetNumberOfOverflowedSamples , ( DSI_Headset h ) , RETURN( 0, HEADSET->BufferOverflow() ) )
+DSI_API_FUNC( void , DSI_Headset_ConfigureBufferingController , ( DSI_Headset h, double secondsBetweenUpdates, double smoothing, double P, double I, double D ) , DO( HEADSET->ConfigureBufferingController( secondsBetweenUpdates, smoothing, P, I, D ) ) )
+DSI_API_FUNC( void , DSI_Headset_ConfigureBatch , ( DSI_Headset h, unsigned int nSamples, double targetDelaySeconds ) , DO( HEADSET->ConfigureBatch( nSamples, targetDelaySeconds ) ) )
+DSI_API_FUNC( int , DSI_Headset_StartBackgroundAcquisition , ( DSI_Headset h ) , RETURN( false, HEADSET->StartBackgroundAcquisition() ) )
+DSI_API_FUNC( void , DSI_Headset_StopBackgroundAcquisition , ( DSI_Headset h ) , DO( HEADSET->StopBackgroundAcquisition() ) )
+DSI_API_FUNC( double , DSI_Headset_WaitForBatch , ( DSI_Headset h ) , RETURN( 0, HEADSET->WaitForBatch() ) )
+DSI_API_FUNC( size_t , DSI_Headset_WaitForSamples , ( DSI_Headset h, size_t target ) , RETURN( 0, HEADSET->WaitForSamples( target ) ) )
+DSI_API_FUNC( void , DSI_Headset_Idle , ( DSI_Headset h, double seconds ) , DO( HEADSET->Idle( seconds ) ) )
+DSI_API_FUNC( void , DSI_Headset_Receive , ( DSI_Headset h, double seconds, double idleAfter ) , DO( HEADSET->Receive( seconds, idleAfter ) ) )
+DSI_API_FUNC( void , DSI_Headset_KillDataStream , ( DSI_Headset h, bool_t expectReply ) , DO( HEADSET->KillDataStream( expectReply ) ) )
+DSI_API_FUNC( int , DSI_Headset_GetAlarm , ( DSI_Headset h, bool_t remove ) , RETURN( 0, HEADSET->GetAlarm( remove ) ) )
+DSI_API_FUNC( size_t , DSI_Headset_GetNumberOfAlarms , ( DSI_Headset h ) , RETURN( 0, HEADSET->GetNumberOfAlarms() ) )
+DSI_API_FUNC( void , DSI_Headset_ClearAlarms , ( DSI_Headset h ) , DO( HEADSET->ClearAlarms() ) )
+
+/* DSI_Headset getters */
+DSI_API_FUNC( void , DSI_Headset_SendBatteryQuery , ( DSI_Headset h ) , DO( HEADSET->StartBatteryMonitor() ) )
+DSI_API_FUNC( double , DSI_Headset_GetBatteryLevel , ( DSI_Headset h, int whichBattery ) , RETURN( -1.0, HEADSET->BatteryLevel( whichBattery ) ) )
+DSI_API_FUNC( const char * , DSI_Headset_GetBatteryLevelString , ( DSI_Headset h ) , RETURNSTRING( HEADSET->BatteryLevel() ) )
+DSI_API_FUNC( const char * , DSI_Headset_GetFirmwareRevision , ( DSI_Headset h ) , RETURNSTRING( HEADSET->FirmwareRevision() ) )
+DSI_API_FUNC( const char * , DSI_Headset_GetMontageString , ( DSI_Headset h ) , RETURNSTRING( HEADSET->MontageString( true ) ) )
+DSI_API_FUNC( const char * , DSI_Headset_GetSourceNames , ( DSI_Headset h, DSI_SourceSelection selection ) , RETURNSTRING( HEADSET->SourceNames( selection ) ) )
+DSI_API_FUNC( const char * , DSI_Headset_GetInfoString , ( DSI_Headset h ) , RETURNSTRING( HEADSET->InfoString() ) )
+DSI_API_FUNC( unsigned int , DSI_Headset_GetSensorCount , ( DSI_Headset h ) , RETURN( 0, HEADSET->SensorCount() ) )
+DSI_API_FUNC( unsigned int , DSI_Headset_GetSerialNumber , ( DSI_Headset h ) , RETURN( 0, HEADSET->SerialNumber() ) )
+DSI_API_FUNC( double , DSI_Headset_GetSamplingRate , ( DSI_Headset h ) , RETURN( -1.0, HEADSET->SamplingRate() ) )
+DSI_API_FUNC( double , DSI_Headset_GetAccelerometerRate , ( DSI_Headset h ) , RETURN( -1.0, HEADSET->AccelerometerRate() ) )
+DSI_API_FUNC( int , DSI_Headset_GetFilterMode , ( DSI_Headset h ) , RETURN( -1, HEADSET->FilterMode() ) )
+DSI_API_FUNC( int , DSI_Headset_GetDataAcquisitionMode , ( DSI_Headset h ) , RETURN( -1, HEADSET->DataAcquisitionMode() ) )
+DSI_API_FUNC( int , DSI_Headset_GetImpedanceDriverMode , ( DSI_Headset h ) , RETURN( -1, HEADSET->ImpedanceDriverMode() ) )
+DSI_API_FUNC( int , DSI_Headset_GetAnalogResetMode , ( DSI_Headset h ) , RETURN( -1, HEADSET->AnalogResetMode() ) )
+DSI_API_FUNC( double , DSI_Headset_GetImpedanceCMF , ( DSI_Headset h ) , RETURN( 0.0, HEADSET->ImpedanceCMF() ) )
+
+/* DSI_Headset low-level commands: */
+DSI_API_FUNC( void , DSI_Headset_StartDataAcquisition , ( DSI_Headset h ) , DO( HEADSET->StartDataAcquisition() ) )
+DSI_API_FUNC( void , DSI_Headset_StopDataAcquisition , ( DSI_Headset h ) , DO( HEADSET->StopDataAcquisition() ) )
+DSI_API_FUNC( void , DSI_Headset_StartImpedanceDriver , ( DSI_Headset h ) , DO( HEADSET->StartImpedanceDriver() ) )
+DSI_API_FUNC( void , DSI_Headset_StopImpedanceDriver , ( DSI_Headset h ) , DO( HEADSET->StopImpedanceDriver() ) )
+DSI_API_FUNC( void , DSI_Headset_StartAnalogReset , ( DSI_Headset h ) , DO( HEADSET->StartAnalogReset( false ) ) )
+DSI_API_FUNC( void , DSI_Headset_LockAnalogReset , ( DSI_Headset h ) /* NB: invalidates signal data */ , DO( HEADSET->LockAnalogReset() ) )
+DSI_API_FUNC( void , DSI_Headset_ReleaseAnalogReset , ( DSI_Headset h ) , DO( HEADSET->ReleaseAnalogReset() ) )
+DSI_API_FUNC( void , DSI_Headset_Shutdown , ( DSI_Headset h ) , DO( HEADSET->Shutdown() ) )
+DSI_API_FUNC( void , DSI_Headset_UseOptionalCommandPrefix , ( DSI_Headset h, bool_t yesOrNo ) , DO( HEADSET->SetCommandPrefix( yesOrNo ? "\x7E" : "" ) ) )
+DSI_API_FUNC( void , DSI_Headset_ChangeLEDs , ( DSI_Headset h, int setAndSelect ) , DO( HEADSET->ChangeLEDs( ( uint16_t ) setAndSelect ) ) )
+/* NB: DSI_Headset_ChangeLEDs is only available on certain firmware versions; conventions for the setAndSelect argument are supplied separately */
+
+/* **************************************************** */
+/* DSI_Headset methods supporting plug-in processing */
+/* stages, and DSI_ProcessingStage methods */
+/* **************************************************** */
+
+DSI_API_FUNC( DSI_ProcessingStage , DSI_Headset_AddProcessingStage , ( DSI_Headset h, const char * name, DSI_SampleCallback func, void * paramData, DSI_ProcessingStage input ) , RETURN( ( DSI_ProcessingStage )NULL, ( DSI_ProcessingStage )HEADSET->AddProcessingStage( INPUTSTRING( name ), ( DSI::SampleCallback )func, paramData, ( DSI::ProcessingStage * )input ) ) )
+DSI_API_FUNC( void , DSI_Headset_ReallocateStageBuffers , ( DSI_Headset h, DSI_ProcessingStage stage, double seconds ) , DO( HEADSET->ReallocateStageBuffers( ( DSI::ProcessingStage * )stage, seconds ) ) )
+DSI_API_FUNC( unsigned int , DSI_Headset_GetNumberOfProcessingStages , ( DSI_Headset h ) , RETURN( 0, HEADSET->NumberOfProcessingStages() ) )
+DSI_API_FUNC( DSI_ProcessingStage , DSI_Headset_GetProcessingStageByIndex , ( DSI_Headset h, unsigned int index ) , RETURN( ( DSI_ProcessingStage )NULL, ( DSI_ProcessingStage )HEADSET->GetProcessingStageByIndex( index ) ) )
+DSI_API_FUNC( DSI_ProcessingStage , DSI_Headset_GetProcessingStageByName , ( DSI_Headset h, const char * name ) , RETURN( ( DSI_ProcessingStage )NULL, ( DSI_ProcessingStage )HEADSET->MatchProcessingStage( INPUTSTRING( name ), true ) ) )
+
+DSI_API_FUNC( DSI_Channel , DSI_ProcessingStage_GetChannelByIndex , ( DSI_ProcessingStage p, unsigned int index ) , RETURN( ( DSI_Channel )NULL, ( DSI_Channel )STAGE->GetChannelByIndex( index ) ) )
+DSI_API_FUNC( DSI_Channel , DSI_ProcessingStage_GetChannelByName , ( DSI_ProcessingStage p, const char * name ) , RETURN( ( DSI_Channel )NULL, ( DSI_Channel )STAGE->GetChannelByName( INPUTSTRING( name ), true ) ) )
+DSI_API_FUNC( void , DSI_ProcessingStage_ClearChannels , ( DSI_ProcessingStage p ) , DO( STAGE->ClearChannels() ) )
+DSI_API_FUNC( DSI_Channel , DSI_ProcessingStage_AddChannel , ( DSI_ProcessingStage p, const char * name, size_t bufferSamples ) , RETURN( ( DSI_Channel )NULL, ( DSI_Channel )STAGE->AddChannel( INPUTSTRING( name ), bufferSamples ) ) )
+DSI_API_FUNC( DSI_ProcessingStage , DSI_ProcessingStage_GetInput , ( DSI_ProcessingStage p ) , RETURN( ( DSI_ProcessingStage )NULL, ( DSI_ProcessingStage )STAGE->Input() ) )
+DSI_API_FUNC( const char * , DSI_ProcessingStage_GetName , ( DSI_ProcessingStage p ) , RETURNSTRING( STAGE->Name() ) )
+DSI_API_FUNC( void * , DSI_ProcessingStage_ParamData , ( DSI_ProcessingStage p ) , RETURN( NULL, STAGE->ParamData() ) )
+DSI_API_FUNC( unsigned int , DSI_ProcessingStage_GetNumberOfChannels , ( DSI_ProcessingStage p ) , RETURN( 0, STAGE->NumberOfChannels() ) )
+DSI_API_FUNC( double , DSI_ProcessingStage_Read , ( DSI_ProcessingStage p, unsigned int channel, size_t lookbackSteps ) , RETURN( 0.0, STAGE->Read( channel, lookbackSteps ) ) )
+DSI_API_FUNC( void , DSI_ProcessingStage_Write , ( DSI_ProcessingStage p, unsigned int channel, double value ) , DO( STAGE->Write( channel, value ) ) )
+
+
+/* **************************************************** */
+/* DSI_Channel methods */
+/* **************************************************** */
+
+DSI_API_FUNC( double , DSI_Channel_LookBack , ( DSI_Channel c, size_t nSteps ) , RETURN( 0.0, CHANNEL->LookBack( nSteps, false ) ) )
+DSI_API_FUNC( double , DSI_Channel_ReadBuffered , ( DSI_Channel c ) , RETURN( 0.0, CHANNEL->ReadBuffered() ) )
+DSI_API_FUNC( size_t , DSI_Channel_GetNumberOfBufferedSamples , ( DSI_Channel c ) , RETURN( 0, CHANNEL->Buffered() ) )
+DSI_API_FUNC( size_t , DSI_Channel_GetNumberOfOverflowedSamples , ( DSI_Channel c ) , RETURN( 0, CHANNEL->BufferOverflow() ) )
+DSI_API_FUNC( size_t , DSI_Channel_GetBufferCapacity , ( DSI_Channel c ) , RETURN( 0, CHANNEL->BufferSize() ) )
+DSI_API_FUNC( void , DSI_Channel_FlushOutputBuffer , ( DSI_Channel c ) , DO( CHANNEL->FlushOutputBuffer() ) )
+DSI_API_FUNC( void , DSI_Channel_SetName , ( DSI_Channel c, const char * name ) , DO( CHANNEL->SetName( INPUTSTRING( name ) ) ) )
+DSI_API_FUNC( const char * , DSI_Channel_GetName , ( DSI_Channel c ) , RETURNSTRING( CHANNEL->Name() ) )
+DSI_API_FUNC( const char * , DSI_Channel_GetString , ( DSI_Channel c ) , RETURNSTRING( CHANNEL->String( true ) ) )
+DSI_API_FUNC( double , DSI_Channel_GetSignal , ( DSI_Channel c ) , RETURN( 0.0, CHANNEL->Signal() ) )
+DSI_API_FUNC( double , DSI_Channel_CalculateRawSignal , ( DSI_Channel c ) , RETURN( 0.0, CHANNEL->CalculateRawSignal() ) )
+DSI_API_FUNC( bool_t , DSI_Channel_IsReferentialEEG , ( DSI_Channel c ) , RETURN( 0, CHANNEL->IsReferentialEEG() ) )
+DSI_API_FUNC( bool_t , DSI_Channel_IsTrigger , ( DSI_Channel c ) , RETURN( 0, CHANNEL->IsTrigger() ) )
+
+
+/* **************************************************** */
+/* DSI_Source methods */
+/* **************************************************** */
+
+DSI_API_FUNC( bool_t , DSI_Source_IsTrigger , ( DSI_Source s ) , RETURN( 0, SOURCE->IsTrigger() ) )
+DSI_API_FUNC( bool_t , DSI_Source_IsSensor , ( DSI_Source s ) , RETURN( 0, SOURCE->IsSensor() ) )
+DSI_API_FUNC( bool_t , DSI_Source_IsReferentialEEG , ( DSI_Source s ) , RETURN( 0, SOURCE->IsReferentialEEG() ) )
+DSI_API_FUNC( bool_t , DSI_Source_IsFactoryReference , ( DSI_Source s ) , RETURN( 0, SOURCE->IsFactoryReference() ) )
+DSI_API_FUNC( bool_t , DSI_Source_IsCommonModeSignal , ( DSI_Source s ) , RETURN( 0, SOURCE->IsCommonModeSignal() ) )
+DSI_API_FUNC( void , DSI_Source_SetName , ( DSI_Source s, const char * name ) , DO( SOURCE->SetName( INPUTSTRING( name ) ) ) )
+DSI_API_FUNC( const char * , DSI_Source_GetName , ( DSI_Source s ) , RETURNSTRING( SOURCE->Name() ) )
+DSI_API_FUNC( double , DSI_Source_GetSignal , ( DSI_Source s ) , RETURN( 0.0, SOURCE->Signal() ) )
+DSI_API_FUNC( double , DSI_Source_GetGain , ( DSI_Source s ) , RETURN( 0.0, SOURCE->Gain() ) )
+DSI_API_FUNC( double , DSI_Source_GetImpedanceEEG , ( DSI_Source s ) , RETURN( 0.0, SOURCE->ImpedanceEEG() ) )
+DSI_API_FUNC( double , DSI_Source_GetImpedanceCMF , ( DSI_Source s ) /* deprecated: use DSI_Headset_GetImpedanceCMF */ , RETURN( 0.0, SOURCE->ImpedanceCMF() ) )
+DSI_API_FUNC( double , DSI_Source_GetDCOffset , ( DSI_Source s ) , RETURN( 0.0, SOURCE->DCOffset() ) )
+
+
+
+
+#ifdef EXTERNC
+} /* ends extern "C" block */
+#endif /* #ifdef EXTERNC */
+#undef DSI_API_FUNC
diff --git a/physiolabxr/thirdparty/WearableSensing/DSI.py b/physiolabxr/thirdparty/WearableSensing/DSI.py
new file mode 100644
index 00000000..59c34bc7
--- /dev/null
+++ b/physiolabxr/thirdparty/WearableSensing/DSI.py
@@ -0,0 +1,329 @@
+#!/usr/bin/env python
+"""
+This is a Python interface to the Dry Sensor Interface (DSI) headset by Wearable Sensing
+LLC. It uses the DSI API, loaded from the libDSI dynamic library via ctypes. The dynamic
+library must be in the same directory as this Python file. Function prototypes are parsed
+automatically at import time from DSI.h, so DSI.h must also be in the same directory.
+
+Most of the C functions are reinterpreted as object methods: this module defines
+classes Headset, Source and Channel to wrap them, and adds two helper methods:
+Headset.Sources() and Headset.Channels(). It also defines various global functions,
+and the decorators SampleCallback and MessageCallback. Examples of how to use the
+decorators, and a minimal Test() function, are provided at the bottom of this file.
+
+Normal usage would be to import this file and use the classes and functions the module
+provides. As a quick test, the Test() function can be run by executing this file directly,
+with the serial port address as the first command-line argument, and (optionally) the
+reference Source name or the word 'impedances' as the second.
+
+The Python source file also contains copyright and disclaimer information.
+"""
+
+# This file is part of the Application Programmer's Interface (API) for Dry Sensor Interface
+# (DSI) EEG systems by Wearable Sensing. The API consists of code, headers, dynamic libraries
+# and documentation. The API allows software developers to interface directly with DSI
+# systems to control and to acquire data from them.
+#
+# The API is not certified to any specific standard. It is not intended for clinical use.
+# The API, and software that makes use of it, should not be used for diagnostic or other
+# clinical purposes. The API is intended for research use and is provided on an "AS IS"
+# basis. WEARABLE SENSING, INCLUDING ITS SUBSIDIARIES, DISCLAIMS ANY AND ALL WARRANTIES
+# EXPRESSED, STATUTORY OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT OR THIRD PARTY RIGHTS.
+#
+# (c) @YEARS@ Wearable Sensing LLC
+
+
+# TODO: enum
+
+__all__ = [
+ 'Headset', 'Source', 'Channel',
+ 'SampleCallback', 'MessageCallback',
+ 'DSIException',
+ 'IfStringThenRawString', 'IfStringThenNormalString',
+]
+# global DSI_* functions from the dylib will be appended to this
+
+import os, sys, ctypes
+import numpy as np
+
+from pylsl import StreamInfo, StreamOutlet
+
+if sys.version >= '3': unicode = str; basestring = ( bytes, unicode ) # bytes is already defined, unicode is not
+else: bytes = str # unicode is already defined, bytes is not
+
+# LSL globals
+info = StreamInfo('DSI24-LSL', 'EEG', 24, 250, 'float32', 'uniqueid12345')
+outlet = StreamOutlet(info)
+
+def IfStringThenRawString( x ):
+ """
+ A string is likely to be either raw bytes already, or utf-8-encoded unicode. A simple
+ quoted string literal may or may not be raw bytes, depending on Python version. This
+ is a problem.
+
+ If x is a string then, regardless of Python version and starting format, return the
+ "raw bytes" version of it so that we can send it over a serial port, pass it via
+ ctypes to a C function, etc.
+
+ If x is not a string, return it unchanged (so you can use this function to filter a
+ whole list of arguments agnostically).
+
+ See also IfStringThenNormalString()
+ """
+ if isinstance( x, unicode ): x = x.encode( 'utf-8' )
+ return x
+def IfStringThenNormalString( x ):
+ """
+ A string is likely to be either raw bytes or utf-8-encoded unicode. Depending on
+ Python version, either the raw bytes or the unicode might be treated as a "normal"
+ string (i.e. the type you get from an ordinary quoted string literal, and the type
+ can be print()ed without adornment). This is a problem.
+
+ If x is a string then, regardless of Python version and starting format, return the
+ "normal string" version of it so that we can print it, use it for formatting, make an
+ Exception out of it, get on with our lives, etc.
+
+ If x is not a string, return it unchanged (so you can feel free to use this function
+ to filter a whole list of arguments agnostically).
+
+ See also IfStringThenRawString()
+ """
+ if str is not bytes and isinstance( x, bytes ): x = x.decode( 'utf-8' )
+ return x
+
+
+class Headset:
+ def __init__( self, arg=None ):
+ self.ptr = arg
+ self.__needs_cleanup = False
+ if arg is None or isinstance( arg, basestring ): # treat arg as port specifier string rather than a pointer
+ self.ptr = dll.DSI_Headset_New( IfStringThenRawString( arg ) )
+ self.__needs_cleanup = True
+ def __del__( self ):
+ if self.__needs_cleanup:
+ try: dll.DSI_Headset_Delete( self.ptr )
+ except: pass
+ else: self.__needs_cleanup = False
+ def Sources( self ): return [ self.GetSourceByIndex( i ) for i in range( self.GetNumberOfSources() ) ]
+ def Channels( self ): return [ self.GetChannelByIndex( i ) for i in range( self.GetNumberOfChannels() ) ]
+
+class Source:
+ def __init__( self, ptr ): self.ptr = ptr
+
+class Channel:
+ def __init__( self, ptr ): self.ptr = ptr
+
+class ProcessingStage:
+ def __init__( self, ptr ): self.ptr = ptr
+
+class DSIException( Exception ): pass
+
+
+SampleCallback = ctypes.CFUNCTYPE( None, ctypes.c_void_p, ctypes.c_double, ctypes.c_void_p )
+MessageCallback = ctypes.CFUNCTYPE( ctypes.c_int, ctypes.c_char_p, ctypes.c_int )
+
+@MessageCallback
+def NullMessageCallback( msg, lvl=0 ): return 1
+
+@SampleCallback
+def NullSampleCallback( headsetPtr, packetTime, userData ): pass
+
+__allprototypes__ = []
+def LoadAPI( dllname = None ):
+ import platform, re, inspect, ctypes.util
+
+ if dllname == None:
+ uname = platform.system()
+ machine = platform.machine().lower()
+ if machine.startswith( 'armv' ): machine = machine.rstrip( 'l' )
+ try: maxsize = sys.maxsize # not available in Python 2.5
+ except: maxsize = sys.maxint # not available in Python 3
+ executable_architecture= '64bit' if maxsize > 2 ** 32 else '32bit'
+ # we must catch the case of 32-bit Python running on 64-bit machine, so we're interested
+ # in this more than the underlying platform.machine(). And for some reason the official
+ # python.org docs recommend using sys.maxsize in this way rather than using
+ # platform.architecture()[0]
+
+ if not machine or machine in [ 'i386', 'x86_64', 'amd64' ]:
+ #arch = executable_architecture
+ arch = 'i386' if executable_architecture.startswith( '32' ) else 'x86_64'
+ else:
+ arch = machine
+
+ if uname.lower().startswith( 'win' ): dllxtn = '.dll'
+ elif uname.lower().startswith( 'darwin' ): dllxtn = '.dylib'
+ else: dllxtn = '.so'
+ dllname = 'libDSI-' + uname + '-' + arch + dllxtn
+
+ headername = 'DSI.h'
+
+ global dllpath, headerpath
+ whereami = os.path.dirname( os.path.abspath( inspect.getfile( inspect.currentframe() ) ) )
+ dllpath = ctypes.util.find_library( dllname ) # try the usual places first: current working dir, then $DYLD_LIBRARY_PATH and friends (posix) or %PATH% (Windows)
+ if dllpath == None: dllpath = os.path.join( whereami, dllname ) # if failed, try right next to this .py file
+ if not os.path.isfile( dllpath ): dllpath = None
+ if dllpath == None: raise OSError( "failed to find dynamic library " + dllname )
+ dllpath = os.path.abspath( dllpath )
+ whereisdll = os.path.dirname( dllpath )
+ dll = ctypes.CDLL( dllpath )
+ headerpath = os.path.join( whereisdll, headername ) # expect to find header next to dynamic library, wherever it was
+ if not os.path.isfile( headerpath ): raise OSError( "failed to find header %s in directory %s" % ( headername, whereisdll ) )
+
+ prototypes = [ line.split( ' , ' ) for line in open( headerpath ).readlines() if line.strip().startswith( 'DSI_API_FUNC\x28' ) ]
+
+ ctypetypes = {
+ 'DSI_Headset' : ctypes.c_void_p,
+ 'DSI_Source' : ctypes.c_void_p,
+ 'DSI_Channel' : ctypes.c_void_p,
+ 'DSI_ProcessingStage' : ctypes.c_void_p,
+ 'void *' : ctypes.c_void_p,
+ 'const void *' : ctypes.c_void_p,
+ 'const char *' : ctypes.c_char_p,
+ 'size_t' : ctypes.c_size_t,
+ 'bool_t' : getattr( ctypes, 'c_bool', ctypes.c_int ),
+ 'int' : ctypes.c_int,
+ 'unsigned int' : ctypes.c_uint,
+ 'double' : ctypes.c_double,
+ 'void' : None,
+ 'DSI_MessageCallback' : MessageCallback,
+ 'DSI_SampleCallback' : SampleCallback,
+ 'DSI_SourceSelection' : ctypes.c_int,
+ }
+
+ classes = { 'DSI_Headset' : Headset, 'DSI_Source': Source, 'DSI_Channel': Channel, 'DSI_ProcessingStage': ProcessingStage }
+
+ def wrapfunction( funcptr, outputClass, doc ):
+ def function( *args ):
+ args = [ IfStringThenRawString( arg ) for arg in args ]
+ output = funcptr( *args )
+ err = dll.DSI_ClearError()
+ if err: raise( DSIException( IfStringThenNormalString( err ) ) )
+ if outputClass: output = outputClass( output )
+ return IfStringThenNormalString( output )
+ function.__doc__ = doc
+ return function
+
+ def wrapmethod( funcptr, outputClass, doc ):
+ def method( self, *args ):
+ args = [ IfStringThenRawString( arg ) for arg in args ]
+ output = funcptr( self.ptr, *args )
+ err = dll.DSI_ClearError()
+ if err: raise( DSIException( IfStringThenNormalString( err ) ) )
+ if outputClass: output = outputClass( output )
+ return IfStringThenNormalString( output )
+ method.__doc__ = doc
+ return method
+
+ globalFuncs = {}
+
+ def clean( s ): return re.sub( r'\/\*.*\*\/', '', s ).strip()
+
+ for prototype in prototypes:
+
+ restype = clean( prototype[ 0 ].split( ' ', 1 )[ 1 ] )
+ funcname = clean( prototype[ 1 ] )
+ args = clean( prototype[ 2 ] )
+ doc = restype + ' ' + funcname + args + ';'
+ __allprototypes__.append( doc )
+ args = args.strip( '()' ).split( ',' )
+ funcptr = getattr( dll, funcname )
+ funcptr.restype = ctypetypes[ restype ]
+ outputClass = classes.get( restype, None )
+ for prefix, cls in classes.items():
+ if funcname.startswith( prefix + '_' ):
+ methodname = funcname[ len( prefix ) + 1 : ]
+ setattr( cls, methodname, wrapmethod( funcptr, outputClass, doc ) )
+ break
+ else:
+ if funcname.startswith( 'DSI_' ): funcname = funcname[ 4 : ]
+ globalFuncs[ funcname ] = wrapfunction( funcptr, outputClass, doc )
+ args = [ arg.strip().rsplit( ' ', 1 ) for arg in args ]
+ if args != [ [ 'void' ] ]: funcptr.argtypes = tuple( [ ctypetypes[ arg[ 0 ].strip() ] for arg in args ] )
+ return dll, globalFuncs
+
+dll, globalFuncs = LoadAPI()
+locals().update( globalFuncs )
+__all__ += globalFuncs.keys()
+del globalFuncs
+del LoadAPI
+del Headset.New # only Headset.__init__() should be calling DSI_Headset_New()
+del Headset.Delete # only Headset.__del__() should be calling DSI_Headset_Delete()
+del os, sys, ctypes
+
+
+#########################################################################################
+##### Example code ######################################################################
+#########################################################################################
+
+# import sys
+#
+# array = np.array([,])
+# t = []
+#
+# @MessageCallback
+# def ExampleMessageCallback( msg, lvl=0 ):
+# if lvl <= 3: # ignore messages at debugging levels higher than 3
+# print( "DSI Message (level %d): %s" % ( lvl, IfStringThenNormalString( msg ) ) )
+# return 1
+#
+# @SampleCallback
+# def ExampleSampleCallback_Signals( headsetPtr, packetTime, userData ):
+# global array, t
+# h = Headset( headsetPtr )
+# string = np.array([ '%+08.2f' % ( ch.GetSignal() ) for ch in h.Channels() ])
+# strings = string.reshape(1,24)
+# #values = [ch.ReadBuffered() for ch in h.Channels() ]
+# array = np.concatenate([array, strings],axis=0)
+# t = t + [('%8.3f' % packetTime)]
+#
+# print(array.shape)
+# print(len(t))
+#
+#
+# sys.stdout.flush()
+#
+# @SampleCallback
+# def ExampleSampleCallback_Impedances( headsetPtr, packetTime, userData ):
+# h = Headset( headsetPtr )
+# fmt = '%s = %5.3f'
+# strings = [ fmt % ( IfStringThenNormalString( src.GetName() ), src.GetImpedanceEEG() ) for src in h.Sources() if src.IsReferentialEEG() and not src.IsFactoryReference() ]
+# strings.append( fmt % ( 'CMF @ ' + h.GetFactoryReferenceString(), h.GetImpedanceCMF() ) )
+# print( ( '%8.3f: ' % packetTime ) + ', '.join( strings ) )
+# sys.stdout.flush()
+#
+# def Test( port, arg='' ):
+# """
+# `arg` is either a specification of the desired reference, or the
+# string "impedances"
+# """
+#
+# h = Headset() # if we did not want to change the callbacks first, we could simply say h = Headset( port )
+# h.SetMessageCallback( ExampleMessageCallback ) # could set this to NullMessageCallback instead if we wanted to shut it up
+# h.Connect( port )
+# if arg.lower().startswith( 'imp' ):
+# h.SetSampleCallback( ExampleSampleCallback_Impedances, 0 )
+# h.StartImpedanceDriver()
+# else:
+# h.SetSampleCallback( ExampleSampleCallback_Signals, 0 )
+# if len( arg.strip() ): h.SetDefaultReference( arg, True )
+# #h.Receive( 2.0, 2.0 )
+# h.StartDataAcquisition()
+# h.StopDataAcquisition()
+# # calls StartDataAcquisition(), then Idle() for 2 seconds, then StopDataAcquisition(), then Idle() for 2 seconds
+# # NB: your application will probably want to use Idle( seconds ) in its main loop instead of Receive()
+#
+# if __name__ == '__main__':
+# args = getattr( sys, 'argv', [ '' ] )
+# if sys.platform.lower().startswith( 'win' ): default_port = 'COM3'
+# else: default_port = '/dev/cu.DSI7-0009.BluetoothSeri'
+#
+# # first command-line argument: serial port address
+# if len( args ) > 1: port = args[ 1 ]
+# else: port = default_port
+#
+# # second command-line argument: name of the Source to be used as reference, or the word 'impedances'
+# if len( args ) > 2: ref = args[ 2 ]
+# else: ref = ''
+#
+# Test( port, ref )
diff --git a/physiolabxr/thirdparty/WearableSensing/DSI_API_Loader.c b/physiolabxr/thirdparty/WearableSensing/DSI_API_Loader.c
new file mode 100644
index 00000000..76f52492
--- /dev/null
+++ b/physiolabxr/thirdparty/WearableSensing/DSI_API_Loader.c
@@ -0,0 +1,49 @@
+/*
+
+# This file is part of the Application Programmer's Interface (API) for Dry Sensor Interface
+# (DSI) EEG systems by Wearable Sensing. The API consists of code, headers, dynamic libraries
+# and documentation. The API allows software developers to interface directly with DSI
+# systems to control and to acquire data from them.
+#
+# The API is not certified to any specific standard. It is not intended for clinical use.
+# The API, and software that makes use of it, should not be used for diagnostic or other
+# clinical purposes. The API is intended for research use and is provided on an "AS IS"
+# basis. WEARABLE SENSING, INCLUDING ITS SUBSIDIARIES, DISCLAIMS ANY AND ALL WARRANTIES
+# EXPRESSED, STATUTORY OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT OR THIRD PARTY RIGHTS.
+#
+# (c) @YEARS@ Wearable Sensing LLC
+
+Add this file to your C or C++ project. Call Load_DSI_API() and check that its return
+value is 0 before proceeding to call any DSI_... functions. See README.txt for more details.
+
+*/
+
+#include "DSI.h"
+
+const char * DSI_Stub( void ) { static char msg[] = "DSI API is not loaded"; return msg; }
+#define DSI_API_FUNC( type, name, args, implementation ) type ( *name ) args = ( type ( * ) args )DSI_Stub;
+#include "DSI.h"
+
+#ifdef _WIN32
+#include
+#else
+#include
+#define HINSTANCE void *
+#define LoadLibrary( a ) dlopen( a, RTLD_NOW | RTLD_GLOBAL )
+#define GetProcAddress( a, b ) dlsym( ( a ), ( b ) )
+#endif /* _WIN32 */
+
+
+int Load_DSI_API( const char *dllname )
+{
+ int failures = 0;
+ HINSTANCE dll = LoadLibrary( DSI_DYLIB_NAME( dllname ) );
+ if( !dll ) return -1;
+
+#define SUPPRESS_EXTERNC
+#define DSI_API_FUNC( type, name, args, implementation ) \
+ if ( ( name = ( type ( * ) args )GetProcAddress( dll , #name ) ) == 0 ) failures++;
+#include "DSI.h"
+ return failures;
+}
diff --git a/physiolabxr/thirdparty/WearableSensing/DSI_py3.py b/physiolabxr/thirdparty/WearableSensing/DSI_py3.py
new file mode 100644
index 00000000..511e6fc7
--- /dev/null
+++ b/physiolabxr/thirdparty/WearableSensing/DSI_py3.py
@@ -0,0 +1,316 @@
+#!/usr/bin/env python
+"""
+This is a Python interface to the Dry Sensor Interface (DSI) headset by Wearable Sensing
+LLC. It uses the DSI API, loaded from the libDSI dynamic library via ctypes. The dynamic
+library must be in the same directory as this Python file. Function prototypes are parsed
+automatically at import time from DSI.h, so DSI.h must also be in the same directory.
+
+Most of the C functions are reinterpreted as object methods: this module defines
+classes Headset, Source and Channel to wrap them, and adds two helper methods:
+Headset.Sources() and Headset.Channels(). It also defines various global functions,
+and the decorators SampleCallback and MessageCallback. Examples of how to use the
+decorators, and a minimal Test() function, are provided at the bottom of this file.
+
+Normal usage would be to import this file and use the classes and functions the module
+provides. As a quick test, the Test() function can be run by executing this file directly,
+with the serial port address as the first command-line argument, and (optionally) the
+reference Source name or the word 'impedances' as the second.
+
+The Python source file also contains copyright and disclaimer information.
+"""
+
+# This file is part of the Application Programmer's Interface (API) for Dry Sensor Interface
+# (DSI) EEG systems by Wearable Sensing. The API consists of code, headers, dynamic libraries
+# and documentation. The API allows software developers to interface directly with DSI
+# systems to control and to acquire data from them.
+#
+# The API is not certified to any specific standard. It is not intended for clinical use.
+# The API, and software that makes use of it, should not be used for diagnostic or other
+# clinical purposes. The API is intended for research use and is provided on an "AS IS"
+# basis. WEARABLE SENSING, INCLUDING ITS SUBSIDIARIES, DISCLAIMS ANY AND ALL WARRANTIES
+# EXPRESSED, STATUTORY OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT OR THIRD PARTY RIGHTS.
+#
+# (c) @YEARS@ Wearable Sensing LLC
+
+
+# TODO: enum
+
+__all__ = [
+ 'Headset', 'Source', 'Channel',
+ 'SampleCallback', 'MessageCallback',
+ 'DSIException',
+ 'IfStringThenRawString', 'IfStringThenNormalString',
+]
+# global DSI_* functions from the dylib will be appended to this
+import numpy as np
+import os, sys, ctypes
+if sys.version >= '3': unicode = str; basestring = ( bytes, str ) # bytes is already defined, unicode is not
+else: bytes = str # unicode is already defined, bytes is not
+def IfStringThenRawString( x ):
+ """
+ A string is likely to be either raw bytes already, or utf-8-encoded unicode. A simple
+ quoted string literal may or may not be raw bytes, depending on Python version. This
+ is a problem.
+
+ If x is a string then, regardless of Python version and starting format, return the
+ "raw bytes" version of it so that we can send it over a serial port, pass it via
+ ctypes to a C function, etc.
+
+ If x is not a string, return it unchanged (so you can use this function to filter a
+ whole list of arguments agnostically).
+
+ See also IfStringThenNormalString()
+ """
+ if isinstance( x, str ): x = x.encode( 'utf-8' )
+ return x
+def IfStringThenNormalString( x ):
+ """
+ A string is likely to be either raw bytes or utf-8-encoded unicode. Depending on
+ Python version, either the raw bytes or the unicode might be treated as a "normal"
+ string (i.e. the type you get from an ordinary quoted string literal, and the type
+ can be print()ed without adornment). This is a problem.
+
+ If x is a string then, regardless of Python version and starting format, return the
+ "normal string" version of it so that we can print it, use it for formatting, make an
+ Exception out of it, get on with our lives, etc.
+
+ If x is not a string, return it unchanged (so you can feel free to use this function
+ to filter a whole list of arguments agnostically).
+
+ See also IfStringThenRawString()
+ """
+ if str is not bytes and isinstance( x, bytes ): x = x.decode( 'utf-8' )
+ return x
+
+
+class Headset:
+ def __init__( self, arg=None ):
+ self.ptr = arg
+ self.__needs_cleanup = False
+ if arg is None or isinstance( arg, basestring ): # treat arg as port specifier string rather than a pointer
+ self.ptr = dll.DSI_Headset_New(IfStringThenRawString( arg ))
+ self.__needs_cleanup = True
+ def __del__( self ):
+ if self.__needs_cleanup:
+ try: dll.DSI_Headset_Delete( self.ptr )
+ except: pass
+ else:
+ self.__needs_cleanup = False
+ def Sources( self ): return [ self.GetSourceByIndex( i ) for i in range( self.GetNumberOfSources() ) ]
+ def Channels( self ): return [ self.GetChannelByIndex( i ) for i in range( self.GetNumberOfChannels() ) ]
+class Source:
+ def __init__( self, ptr ): self.ptr = ptr
+
+class Channel:
+ def __init__( self, ptr ): self.ptr = ptr
+
+class ProcessingStage:
+ def __init__( self, ptr ): self.ptr = ptr
+
+class DSIException( Exception ): pass
+
+
+SampleCallback = ctypes.CFUNCTYPE( None, ctypes.c_void_p, ctypes.c_double, ctypes.c_void_p )
+MessageCallback = ctypes.CFUNCTYPE( ctypes.c_int, ctypes.c_char_p, ctypes.c_int )
+
+@MessageCallback
+def NullMessageCallback( msg, lvl=0 ): return 1
+
+@SampleCallback
+def NullSampleCallback( headsetPtr, packetTime, userData ): pass
+
+__allprototypes__ = []
+def LoadAPI( dllname = None ):
+ import platform, re, inspect, ctypes.util
+
+ if dllname is None:
+ uname = platform.system()
+ machine = platform.machine().lower()
+ if machine.startswith( 'armv' ): machine = machine.rstrip( 'l' )
+ try: maxsize = sys.maxsize # not available in Python 2.5
+ except: maxsize = sys.maxint # not available in Python 3
+ executable_architecture= '64bit' if maxsize > 2 ** 32 else '32bit'
+ # we must catch the case of 32-bit Python running on 64-bit machine, so we're interested
+ # in this more than the underlying platform.machine(). And for some reason the official
+ # python.org docs recommend using sys.maxsize in this way rather than using
+ # platform.architecture()[0]
+
+ if not machine or machine in [ 'i386', 'x86_64', 'amd64' ]:
+ #arch = executable_architecture
+ arch = 'i386' if executable_architecture.startswith( '32' ) else 'x86_64'
+ else:
+ arch = machine
+
+ if uname.lower().startswith( 'win' ): dllxtn = '.dll'
+ elif uname.lower().startswith( 'darwin' ): dllxtn = '.dylib'
+ else: dllxtn = '.so'
+ dllname = 'libDSI-' + uname + '-' + arch + dllxtn
+
+ headername = 'DSI.h'
+
+ global dllpath, headerpath
+ whereami = os.path.dirname( os.path.abspath( inspect.getfile( inspect.currentframe() ) ) )
+ dllpath = ctypes.util.find_library( dllname ) # try the usual places first: current working dir, then $DYLD_LIBRARY_PATH and friends (posix) or %PATH% (Windows)
+ if dllpath is None: dllpath = os.path.join( whereami, dllname ) # if failed, try right next to this .py file
+ if not os.path.isfile( dllpath ): dllpath = None
+ if dllpath is None: raise OSError( "failed to find dynamic library " + dllname )
+ dllpath = os.path.abspath( dllpath )
+ whereisdll = os.path.dirname( dllpath )
+ dll = ctypes.CDLL( dllpath )
+ headerpath = os.path.join( whereisdll, headername ) # expect to find header next to dynamic library, wherever it was
+ if not os.path.isfile( headerpath ): raise OSError( "failed to find header %s in directory %s" % ( headername, whereisdll ) )
+
+ prototypes = [ line.split( ' , ' ) for line in open( headerpath ).readlines() if line.strip().startswith( 'DSI_API_FUNC\x28' ) ]
+
+ ctypetypes = {
+ 'DSI_Headset' : ctypes.c_void_p,
+ 'DSI_Source' : ctypes.c_void_p,
+ 'DSI_Channel' : ctypes.c_void_p,
+ 'DSI_ProcessingStage' : ctypes.c_void_p,
+ 'void *' : ctypes.c_void_p,
+ 'const void *' : ctypes.c_void_p,
+ 'const char *' : ctypes.c_char_p,
+ 'size_t' : ctypes.c_size_t,
+ 'bool_t' : getattr( ctypes, 'c_bool', ctypes.c_int ),
+ 'int' : ctypes.c_int,
+ 'unsigned int' : ctypes.c_uint,
+ 'double' : ctypes.c_double,
+ 'void' : None,
+ 'DSI_MessageCallback' : MessageCallback,
+ 'DSI_SampleCallback' : SampleCallback,
+ 'DSI_SourceSelection' : ctypes.c_int,
+ }
+
+ classes = { 'DSI_Headset' : Headset, 'DSI_Source': Source, 'DSI_Channel': Channel, 'DSI_ProcessingStage': ProcessingStage }
+
+ def wrapfunction( funcptr, outputClass, doc ):
+ def function( *args ):
+ args = [ IfStringThenRawString( arg ) for arg in args ]
+ output = funcptr( *args )
+ err = dll.DSI_ClearError()
+ if err: raise( DSIException( IfStringThenNormalString( err ) ) )
+ if outputClass: output = outputClass( output )
+ return IfStringThenNormalString( output )
+ function.__doc__ = doc
+ return function
+
+ def wrapmethod( funcptr, outputClass, doc ):
+ def method( self, *args ):
+ args = [ IfStringThenRawString( arg ) for arg in args ]
+ output = funcptr( self.ptr, *args )
+ err = dll.DSI_ClearError()
+ if err: raise( DSIException( IfStringThenNormalString( err ) ) )
+ if outputClass: output = outputClass( output )
+ return IfStringThenNormalString( output )
+ method.__doc__ = doc
+ return method
+
+ globalFuncs = {}
+
+ def clean( s ): return re.sub( r'\/\*.*\*\/', '', s ).strip()
+
+ for prototype in prototypes:
+
+ restype = clean( prototype[ 0 ].split( ' ', 1 )[ 1 ] )
+ funcname = clean( prototype[ 1 ] )
+ args = clean( prototype[ 2 ] )
+ doc = restype + ' ' + funcname + args + ';'
+ __allprototypes__.append( doc )
+ args = args.strip( '()' ).split( ',' )
+ funcptr = getattr( dll, funcname )
+ funcptr.restype = ctypetypes[ restype ]
+ outputClass = classes.get( restype, None )
+ for prefix, cls in classes.items():
+ if funcname.startswith( prefix + '_' ):
+ methodname = funcname[ len( prefix ) + 1 : ]
+ setattr( cls, methodname, wrapmethod( funcptr, outputClass, doc ) )
+ break
+ else:
+ if funcname.startswith( 'DSI_' ): funcname = funcname[ 4 : ]
+ globalFuncs[ funcname ] = wrapfunction( funcptr, outputClass, doc )
+ args = [ arg.strip().rsplit( ' ', 1 ) for arg in args ]
+ if args != [ [ 'void' ] ]: funcptr.argtypes = tuple( [ ctypetypes[ arg[ 0 ].strip() ] for arg in args ] )
+ return dll, globalFuncs
+
+dll, globalFuncs = LoadAPI()
+locals().update( globalFuncs )
+__all__ += globalFuncs.keys()
+del globalFuncs
+del LoadAPI
+del Headset.New # only Headset.__init__() should be calling DSI_Headset_New()
+del Headset.Delete # only Headset.__del__() should be calling DSI_Headset_Delete()
+del os, sys, ctypes
+
+
+#########################################################################################
+##### Example code ######################################################################
+#########################################################################################
+
+import sys
+
+@MessageCallback
+def ExampleMessageCallback( msg, lvl=0 ):
+ if lvl <= 3: # ignore messages at debugging levels higher than 3
+ print( "DSI Message (level %d): %s" % ( lvl, IfStringThenNormalString( msg ) ) )
+ return 1
+
+@SampleCallback
+def ExampleSampleCallback_Signals( headsetPtr, packetTime, userData ):
+ h = Headset( headsetPtr )
+ strings = [ '%s=%+08.2f \n' % ( IfStringThenNormalString( ch.GetName() ), ch.ReadBuffered() ) for ch in h.Channels() ]
+ print( ( '%8.3f: \n' % packetTime ) + ', '.join( strings ) )
+ sys.stdout.flush()
+
+@SampleCallback
+def ExampleSampleCallback_Impedances( headsetPtr, packetTime, userData ):
+ h = Headset( headsetPtr )
+ #fmt = '%s = %5.3f'
+ fmt = '%s'
+ #strings = [ fmt % ( IfStringThenNormalString( src.GetName() ), src.GetImpedanceEEG() ) for src in h.Sources() if src.IsReferentialEEG() and not src.IsFactoryReference() ]
+ strings = np.array([ fmt % (IfStringThenNormalString( src.GetName())) for src in h.Sources() if src.IsReferentialEEG() and not src.IsFactoryReference() ])
+ strings = strings.reshape(20,1)
+ new_strings = strings[[10, 11, 5, 4, 6, 16, 17, 9, 3, 8, 12, 19, 13, 18, 14, 0, 9, 15, 16, 1],:]
+ #strings.append( fmt % ( 'CMF @ ' + h.GetFactoryReferenceString(), h.GetImpedanceCMF() ) )
+ #print( ( '%8.3f: ' % packetTime ) + ', '.join( strings ) )
+ allstrings = np.append(strings, np.array(range(len(strings))).reshape(len(strings),1), axis=1)
+ print(allstrings)
+ sys.stdout.flush()
+
+def Test( port, arg='' ):
+ """
+ `arg` is either a specification of the desired reference, or the
+ string "impedances"
+ """
+ h = Headset() # if we did not want to change the callbacks first, we could simply say h = Headset( port )
+ h.SetMessageCallback( ExampleMessageCallback ) # could set this to NullMessageCallback instead if we wanted to shut it up
+ h.Connect( port )
+ if arg.lower().startswith( 'imp' ):
+ print('impedance mode')
+ h.SetSampleCallback( ExampleSampleCallback_Impedances, 0 )
+ h.StartImpedanceDriver()
+ else:
+ print('normal mode')
+ h.SetSampleCallback( ExampleSampleCallback_Signals, 0 )
+ if len( arg.strip() ): h.SetDefaultReference( arg, True )
+
+ h.StartDataAcquisition() # calls StartDataAcquisition(), then Idle() for 2 seconds, then StopDataAcquisition(), then Idle() for 2 seconds
+ # NB: your application will probably want to use Idle( seconds ) in its main loop instead of Receive()
+ h.GetInfoString()
+ h.Idle(1)
+ h.StopDataAcquisition()
+
+if __name__ == '__main__':
+ args = getattr( sys, 'argv', [ '' ] )
+ if sys.platform.lower().startswith( 'win' ): default_port = 'COM5'
+ else: default_port = '/dev/cu.DSI7-0009.BluetoothSeries'
+
+ # first command-line argument: serial port address
+ if len( args ) > 1: port = args[ 1 ]
+ else: port = default_port
+
+ # second command-line argument: name of the Source to be used as reference, or the word 'impedances'
+ if len( args ) > 2: ref = args[ 2 ]
+ else: ref = ''
+
+ Test( port, '' )
diff --git a/physiolabxr/thirdparty/WearableSensing/__init__.py b/physiolabxr/thirdparty/WearableSensing/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/physiolabxr/thirdparty/WearableSensing/libDSI-Darwin-arm64.dylib b/physiolabxr/thirdparty/WearableSensing/libDSI-Darwin-arm64.dylib
new file mode 100644
index 00000000..09ce3058
Binary files /dev/null and b/physiolabxr/thirdparty/WearableSensing/libDSI-Darwin-arm64.dylib differ
diff --git a/physiolabxr/thirdparty/WearableSensing/libDSI-Darwin-x86_64.dylib b/physiolabxr/thirdparty/WearableSensing/libDSI-Darwin-x86_64.dylib
new file mode 100644
index 00000000..21d71000
Binary files /dev/null and b/physiolabxr/thirdparty/WearableSensing/libDSI-Darwin-x86_64.dylib differ
diff --git a/physiolabxr/thirdparty/WearableSensing/libDSI-Windows-i386.dll b/physiolabxr/thirdparty/WearableSensing/libDSI-Windows-i386.dll
new file mode 100644
index 00000000..8870f225
Binary files /dev/null and b/physiolabxr/thirdparty/WearableSensing/libDSI-Windows-i386.dll differ
diff --git a/physiolabxr/thirdparty/WearableSensing/libDSI-Windows-x86_64.dll b/physiolabxr/thirdparty/WearableSensing/libDSI-Windows-x86_64.dll
new file mode 100644
index 00000000..c9eb2b34
Binary files /dev/null and b/physiolabxr/thirdparty/WearableSensing/libDSI-Windows-x86_64.dll differ
diff --git a/tmpGrpcTools/Program.cs b/tmpGrpcTools/Program.cs
new file mode 100644
index 00000000..3751555c
--- /dev/null
+++ b/tmpGrpcTools/Program.cs
@@ -0,0 +1,2 @@
+// See https://aka.ms/new-console-template for more information
+Console.WriteLine("Hello, World!");
diff --git a/tmpGrpcTools/obj/project.assets.json b/tmpGrpcTools/obj/project.assets.json
new file mode 100644
index 00000000..4b2b1c8a
--- /dev/null
+++ b/tmpGrpcTools/obj/project.assets.json
@@ -0,0 +1,139 @@
+{
+ "version": 3,
+ "targets": {
+ "net8.0": {
+ "Grpc.Tools/2.66.0": {
+ "type": "package",
+ "build": {
+ "build/Grpc.Tools.props": {},
+ "build/Grpc.Tools.targets": {}
+ }
+ }
+ }
+ },
+ "libraries": {
+ "Grpc.Tools/2.66.0": {
+ "sha512": "URTltx2E0aTGQqVW+H09AZiGR/qyQ3naHPymSG1/Ytgm8bPdjSltnsIFanbxb7rXxdYwGzLFGy82R82M1UktXg==",
+ "type": "package",
+ "path": "grpc.tools/2.66.0",
+ "hasTools": true,
+ "files": [
+ ".nupkg.metadata",
+ ".signature.p7s",
+ "README.md",
+ "build/Grpc.Tools.props",
+ "build/Grpc.Tools.targets",
+ "build/_grpc/Grpc.CSharp.xml",
+ "build/_grpc/_Grpc.Tools.props",
+ "build/_grpc/_Grpc.Tools.targets",
+ "build/_protobuf/Google.Protobuf.Tools.props",
+ "build/_protobuf/Google.Protobuf.Tools.targets",
+ "build/_protobuf/Protobuf.CSharp.xml",
+ "build/_protobuf/net45/Protobuf.MSBuild.dll",
+ "build/_protobuf/net45/Protobuf.MSBuild.pdb",
+ "build/_protobuf/netstandard1.3/Protobuf.MSBuild.dll",
+ "build/_protobuf/netstandard1.3/Protobuf.MSBuild.pdb",
+ "build/native/include/google/protobuf/any.proto",
+ "build/native/include/google/protobuf/api.proto",
+ "build/native/include/google/protobuf/descriptor.proto",
+ "build/native/include/google/protobuf/duration.proto",
+ "build/native/include/google/protobuf/empty.proto",
+ "build/native/include/google/protobuf/field_mask.proto",
+ "build/native/include/google/protobuf/source_context.proto",
+ "build/native/include/google/protobuf/struct.proto",
+ "build/native/include/google/protobuf/timestamp.proto",
+ "build/native/include/google/protobuf/type.proto",
+ "build/native/include/google/protobuf/wrappers.proto",
+ "grpc.tools.2.66.0.nupkg.sha512",
+ "grpc.tools.nuspec",
+ "packageIcon.png",
+ "tools/linux_arm64/grpc_csharp_plugin",
+ "tools/linux_arm64/protoc",
+ "tools/linux_x64/grpc_csharp_plugin",
+ "tools/linux_x64/protoc",
+ "tools/linux_x86/grpc_csharp_plugin",
+ "tools/linux_x86/protoc",
+ "tools/macosx_x64/grpc_csharp_plugin",
+ "tools/macosx_x64/protoc",
+ "tools/windows_x64/grpc_csharp_plugin.exe",
+ "tools/windows_x64/protoc.exe",
+ "tools/windows_x86/grpc_csharp_plugin.exe",
+ "tools/windows_x86/protoc.exe"
+ ]
+ }
+ },
+ "projectFileDependencyGroups": {
+ "net8.0": [
+ "Grpc.Tools >= 2.66.0"
+ ]
+ },
+ "packageFolders": {
+ "C:\\Users\\tlupe\\.nuget\\packages\\": {}
+ },
+ "project": {
+ "version": "1.0.0",
+ "restore": {
+ "projectUniqueName": "D:\\School Stuff\\PhysioLabXR\\PhysioLabXR-Community\\tmpGrpcTools\\tmpGrpcTools.csproj",
+ "projectName": "tmpGrpcTools",
+ "projectPath": "D:\\School Stuff\\PhysioLabXR\\PhysioLabXR-Community\\tmpGrpcTools\\tmpGrpcTools.csproj",
+ "packagesPath": "C:\\Users\\tlupe\\.nuget\\packages\\",
+ "outputPath": "D:\\School Stuff\\PhysioLabXR\\PhysioLabXR-Community\\tmpGrpcTools\\obj\\",
+ "projectStyle": "PackageReference",
+ "configFilePaths": [
+ "C:\\Users\\tlupe\\AppData\\Roaming\\NuGet\\NuGet.Config"
+ ],
+ "originalTargetFrameworks": [
+ "net8.0"
+ ],
+ "sources": {
+ "https://api.nuget.org/v3/index.json": {}
+ },
+ "frameworks": {
+ "net8.0": {
+ "targetAlias": "net8.0",
+ "projectReferences": {}
+ }
+ },
+ "warningProperties": {
+ "warnAsError": [
+ "NU1605"
+ ]
+ },
+ "restoreAuditProperties": {
+ "enableAudit": "true",
+ "auditLevel": "low",
+ "auditMode": "direct"
+ }
+ },
+ "frameworks": {
+ "net8.0": {
+ "targetAlias": "net8.0",
+ "dependencies": {
+ "Grpc.Tools": {
+ "include": "Runtime, Build, Native, ContentFiles, Analyzers, BuildTransitive",
+ "suppressParent": "All",
+ "target": "Package",
+ "version": "[2.66.0, )"
+ }
+ },
+ "imports": [
+ "net461",
+ "net462",
+ "net47",
+ "net471",
+ "net472",
+ "net48",
+ "net481"
+ ],
+ "assetTargetFallback": true,
+ "warn": true,
+ "frameworkReferences": {
+ "Microsoft.NETCore.App": {
+ "privateAssets": "all"
+ }
+ },
+ "runtimeIdentifierGraphPath": "C:\\Program Files\\dotnet\\sdk\\8.0.402/PortableRuntimeIdentifierGraph.json"
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/tmpGrpcTools/obj/project.nuget.cache b/tmpGrpcTools/obj/project.nuget.cache
new file mode 100644
index 00000000..0851cd28
--- /dev/null
+++ b/tmpGrpcTools/obj/project.nuget.cache
@@ -0,0 +1,10 @@
+{
+ "version": 2,
+ "dgSpecHash": "r5py4xa+ux0=",
+ "success": true,
+ "projectFilePath": "D:\\School Stuff\\PhysioLabXR\\PhysioLabXR-Community\\tmpGrpcTools\\tmpGrpcTools.csproj",
+ "expectedPackageFiles": [
+ "C:\\Users\\tlupe\\.nuget\\packages\\grpc.tools\\2.66.0\\grpc.tools.2.66.0.nupkg.sha512"
+ ],
+ "logs": []
+}
\ No newline at end of file
diff --git a/tmpGrpcTools/obj/tmpGrpcTools.csproj.nuget.dgspec.json b/tmpGrpcTools/obj/tmpGrpcTools.csproj.nuget.dgspec.json
new file mode 100644
index 00000000..a7f6f4c6
--- /dev/null
+++ b/tmpGrpcTools/obj/tmpGrpcTools.csproj.nuget.dgspec.json
@@ -0,0 +1,72 @@
+{
+ "format": 1,
+ "restore": {
+ "D:\\School Stuff\\PhysioLabXR\\PhysioLabXR-Community\\tmpGrpcTools\\tmpGrpcTools.csproj": {}
+ },
+ "projects": {
+ "D:\\School Stuff\\PhysioLabXR\\PhysioLabXR-Community\\tmpGrpcTools\\tmpGrpcTools.csproj": {
+ "version": "1.0.0",
+ "restore": {
+ "projectUniqueName": "D:\\School Stuff\\PhysioLabXR\\PhysioLabXR-Community\\tmpGrpcTools\\tmpGrpcTools.csproj",
+ "projectName": "tmpGrpcTools",
+ "projectPath": "D:\\School Stuff\\PhysioLabXR\\PhysioLabXR-Community\\tmpGrpcTools\\tmpGrpcTools.csproj",
+ "packagesPath": "C:\\Users\\tlupe\\.nuget\\packages\\",
+ "outputPath": "D:\\School Stuff\\PhysioLabXR\\PhysioLabXR-Community\\tmpGrpcTools\\obj\\",
+ "projectStyle": "PackageReference",
+ "configFilePaths": [
+ "C:\\Users\\tlupe\\AppData\\Roaming\\NuGet\\NuGet.Config"
+ ],
+ "originalTargetFrameworks": [
+ "net8.0"
+ ],
+ "sources": {
+ "https://api.nuget.org/v3/index.json": {}
+ },
+ "frameworks": {
+ "net8.0": {
+ "targetAlias": "net8.0",
+ "projectReferences": {}
+ }
+ },
+ "warningProperties": {
+ "warnAsError": [
+ "NU1605"
+ ]
+ },
+ "restoreAuditProperties": {
+ "enableAudit": "true",
+ "auditLevel": "low",
+ "auditMode": "direct"
+ }
+ },
+ "frameworks": {
+ "net8.0": {
+ "targetAlias": "net8.0",
+ "dependencies": {
+ "Grpc.Tools": {
+ "target": "Package",
+ "version": "[2.66.0, )"
+ }
+ },
+ "imports": [
+ "net461",
+ "net462",
+ "net47",
+ "net471",
+ "net472",
+ "net48",
+ "net481"
+ ],
+ "assetTargetFallback": true,
+ "warn": true,
+ "frameworkReferences": {
+ "Microsoft.NETCore.App": {
+ "privateAssets": "all"
+ }
+ },
+ "runtimeIdentifierGraphPath": "C:\\Program Files\\dotnet\\sdk\\8.0.402/PortableRuntimeIdentifierGraph.json"
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/tmpGrpcTools/obj/tmpGrpcTools.csproj.nuget.g.props b/tmpGrpcTools/obj/tmpGrpcTools.csproj.nuget.g.props
new file mode 100644
index 00000000..faaba399
--- /dev/null
+++ b/tmpGrpcTools/obj/tmpGrpcTools.csproj.nuget.g.props
@@ -0,0 +1,21 @@
+
+
+
+ True
+ NuGet
+ $(MSBuildThisFileDirectory)project.assets.json
+ $(UserProfile)\.nuget\packages\
+ C:\Users\tlupe\.nuget\packages\
+ PackageReference
+ 6.11.1
+
+
+
+
+
+
+
+
+ C:\Users\tlupe\.nuget\packages\grpc.tools\2.66.0
+
+
\ No newline at end of file
diff --git a/tmpGrpcTools/obj/tmpGrpcTools.csproj.nuget.g.targets b/tmpGrpcTools/obj/tmpGrpcTools.csproj.nuget.g.targets
new file mode 100644
index 00000000..98e3b758
--- /dev/null
+++ b/tmpGrpcTools/obj/tmpGrpcTools.csproj.nuget.g.targets
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/tmpGrpcTools/tmpGrpcTools.csproj b/tmpGrpcTools/tmpGrpcTools.csproj
new file mode 100644
index 00000000..038d3747
--- /dev/null
+++ b/tmpGrpcTools/tmpGrpcTools.csproj
@@ -0,0 +1,17 @@
+
+
+
+ Exe
+ net8.0
+ enable
+ enable
+
+
+
+
+ runtime; build; native; contentfiles; analyzers; buildtransitive
+ all
+
+
+
+