diff --git a/notebooks/helm/helm_audit.ipynb b/notebooks/helm/helm_audit.ipynb index 4c02c94f1a1..6fd969cb71a 100644 --- a/notebooks/helm/helm_audit.ipynb +++ b/notebooks/helm/helm_audit.ipynb @@ -20,56 +20,42 @@ "cell_type": "code", "execution_count": 2, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The input data will be loaded from ['short_input.jsonl']\n", - "Loading scenario data from /home/teo/helm/scripts/data_overlap/scenario_data\n", - "Initializing the stats, ngram_index, and ngram_counter {\n", - " Building ngram indexes for LightScenarioKey(scenario_spec=ScenarioSpec(class_name='helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', args={'subject': 'philosophy'}), split='train')\n", - " Building ngram indexes for LightScenarioKey(scenario_spec=ScenarioSpec(class_name='helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', args={'subject': 'philosophy'}), split='valid')\n", - " Building ngram indexes for LightScenarioKey(scenario_spec=ScenarioSpec(class_name='helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', args={'subject': 'philosophy'}), split='test')\n", - " Building ngram indexes for LightScenarioKey(scenario_spec=ScenarioSpec(class_name='helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', args={'subject': 'anatomy'}), split='train')\n", - " Building ngram indexes for LightScenarioKey(scenario_spec=ScenarioSpec(class_name='helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', args={'subject': 'anatomy'}), split='valid')\n", - " Building ngram indexes for LightScenarioKey(scenario_spec=ScenarioSpec(class_name='helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', args={'subject': 'anatomy'}), split='test')\n", - "} [0.076s]\n", - "Computing overlap stats {\n", - "} [0.001s]\n", - "Written 18 results to /home/teo/helm/scripts/data_overlap/output_stats.jsonl\n" - ] - } - ], + "outputs": [], "source": [ - "import subprocess\n", - "helm_process = subprocess.run([\n", - " 'python', \n", - " '/home/teo/helm/scripts/data_overlap/compute_data_overlap_metrics.py',\n", - " '--scenario-data',\n", - " '/home/teo/helm/scripts/data_overlap/scenario_data',\n", - " '--input-data',\n", - " 'short_input.jsonl',\n", - " '--output-stats',\n", - " '/home/teo/helm/scripts/data_overlap/output_stats.jsonl',\n", - " '--input-format',\n", - " 'the_pile'\n", - " ])" + "# import subprocess\n", + "# helm_process = subprocess.run([\n", + "# 'python', \n", + "# '/home/teo/helm/scripts/data_overlap/compute_data_overlap_metrics.py',\n", + "# '--scenario-data',\n", + "# '/home/teo/helm/scripts/data_overlap/scenario_data',\n", + "# '--input-data',\n", + "# 'short_input.jsonl',\n", + "# '--output-stats',\n", + "# '/home/teo/helm/scripts/data_overlap/output_stats.jsonl',\n", + "# '--input-format',\n", + "# 'the_pile'\n", + "# ])" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "kj/filesystem-disk-unix.c++:1703: warning: PWD environment variable doesn't match current directory; pwd = /home/teo/OpenMined/PySyft\n" + ] + } + ], "source": [ "PART_INPUT: str = \"input\"\n", "PART_REF: str = \"references\"\n", "\n", "r = re.compile(r\"[\\s{}]+\".format(re.escape(punctuation)))\n", - "class hashabledict(dict):\n", - " def __hash__(self):\n", - " return hash(tuple(sorted(self.items())))\n", + "from syft.service.code.user_code import hashabledict\n", "\n", "def create_ngram_index(light_scenarios, n_values, stats_key_counts):\n", " ngram_index = hashabledict({n: hashabledict({}) for n in n_values})\n", @@ -129,7 +115,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 5, "metadata": {}, "outputs": [ { @@ -137,14 +123,8 @@ "output_type": "stream", "text": [ "The input data will be loaded from ['short_input.jsonl']\n", - "Loading scenario data from /home/teo/helm/scripts/data_overlap/scenario_data\n", + "Loading scenario data from /home/teo/helm/scripts/data_overlap/scenario_data.jsonl\n", "[{'class_name': 'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', 'args': {'subject': 'philosophy'}}, {'class_name': 'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', 'args': {'subject': 'philosophy'}}, {'class_name': 'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', 'args': {'subject': 'philosophy'}}, {'class_name': 'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', 'args': {'subject': 'anatomy'}}, {'class_name': 'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', 'args': {'subject': 'anatomy'}}, {'class_name': 'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', 'args': {'subject': 'anatomy'}}]\n", - "dict_keys([])\n", - "dict_keys([])\n", - "dict_keys([{'light_scenario_key': {'scenario_spec': {'class_name': 'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', 'args': {'subject': 'philosophy'}}, 'split': 'test'}, 'overlap_protocol_spec': 5}])\n", - "dict_keys([])\n", - "dict_keys([])\n", - "{'id328'} {'light_scenario_key': {'scenario_spec': {'class_name': 'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', 'args': {'subject': 'philosophy'}}, 'split': 'test'}, 'overlap_protocol_spec': 5}\n", "Written 18 results to output2.jsonl\n" ] } @@ -152,7 +132,7 @@ "source": [ "\n", "input_data_path = \"short_input.jsonl\"\n", - "scenario_data_path = \"/home/teo/helm/scripts/data_overlap/scenario_data\"\n", + "scenario_data_path = \"/home/teo/helm/scripts/data_overlap/scenario_data.jsonl\"\n", "output_path = \"output2.jsonl\"\n", "normalization = \"default\"\n", "N = [5, 9, 13]\n", @@ -224,18 +204,12 @@ "total_reference_ids = defaultdict(set)\n", "\n", "for d in stats_key_to_input_ids:\n", - " print(d.keys())\n", - "\n", - "for d in stats_key_to_input_ids:\n", " for key in d:\n", " new_set = set()\n", " if key in total_input_ids:\n", " new_set = total_input_ids[key]\n", " new_set = new_set.union(d[key])\n", " total_input_ids[key] = new_set\n", - " \n", - "for d in total_input_ids:\n", - " print(total_input_ids[d], d)\n", "\n", "for d in stats_key_to_reference_ids:\n", " for key in d:\n", @@ -266,10 +240,72 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 6, "metadata": {}, "outputs": [], - "source": [] + "source": [ + "import syft as sy " + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{{'stats_key': {'light_scenario_key': {'scenario_spec': {'class_name': 'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario',\n", + " 'args': {'subject': 'philosophy'}},\n", + " 'split': 'train'},\n", + " 'overlap_protocol_spec': {'n': 5}},\n", + " 'instance_id': 'id0',\n", + " 'part': 'references'}}" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "list(list(ngram_index.values())[0].values())[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "dummy = {'key': \"value\"}\n", + "dummy = hashabledict({'key': \"value\"})" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "{}" + ], + "text/plain": [ + "{}" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ser = sy.serialize(dummy, to_bytes=True)\n", + "msg = sy.deserialize(ser, from_bytes=True)\n", + "msg" + ] } ], "metadata": { diff --git a/notebooks/helm/helm_shorter.ipynb b/notebooks/helm/helm_shorter.ipynb index 8070672a314..3fe0d12f63a 100644 --- a/notebooks/helm/helm_shorter.ipynb +++ b/notebooks/helm/helm_shorter.ipynb @@ -4,7 +4,15 @@ "cell_type": "code", "execution_count": 1, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "kj/filesystem-disk-unix.c++:1703: warning: PWD environment variable doesn't match current directory; pwd = /home/teo/OpenMined/PySyft\n" + ] + } + ], "source": [ "import syft as sy\n", "from syft.store.blob_storage import BlobStorageConfig, BlobStorageClientConfig\n", @@ -22,18 +30,18 @@ "name": "stdout", "output_type": "stream", "text": [ - "CREATING A PRODUCER ON tcp://127.0.0.1:49041\n", - "CREATING A CONSUMER ON tcp://127.0.0.1:47299\n", + "CREATING A PRODUCER ON tcp://127.0.0.1:41691\n", + "CREATING A CONSUMER ON tcp://127.0.0.1:39839\n", "spawning thread\n", - "CREATING A CONSUMER ON tcp://127.0.0.1:47299\n", + "CREATING A CONSUMER ON tcp://127.0.0.1:39839\n", "spawning thread\n", - "CREATING A CONSUMER ON tcp://127.0.0.1:47299\n", + "CREATING A CONSUMER ON tcp://127.0.0.1:39839\n", "spawning thread\n", - "CREATING A CONSUMER ON tcp://127.0.0.1:47299\n", + "CREATING A CONSUMER ON tcp://127.0.0.1:39839\n", "spawning thread\n", - "CREATING A CONSUMER ON tcp://127.0.0.1:47299\n", + "CREATING A CONSUMER ON tcp://127.0.0.1:39839\n", "spawning thread\n", - "CREATING A CONSUMER ON tcp://127.0.0.1:47299\n", + "CREATING A CONSUMER ON tcp://127.0.0.1:39839\n", "spawning thread\n", "Logged into as \n" ] @@ -158,22 +166,22 @@ " from collections import defaultdict\n", " import re\n", " from string import punctuation\n", - "\n", - " r = re.compile(r\"[\\s{}]+\".format(re.escape(punctuation)))\n", - " stats_key_to_input_ids = defaultdict(set)\n", - " stats_key_to_reference_ids = defaultdict(set)\n", - " document_tokens = r.split(document.lower())\n", - " for n in ngram_index.keys():\n", - " for document_ngram in ngrams(document_tokens, n):\n", - " if document_ngram in ngram_index[n]:\n", - " for entry_overlap_key in ngram_index[n][document_ngram]:\n", - " id = entry_overlap_key['instance_id']\n", - " part = entry_overlap_key['part']\n", - " if part == \"input\":\n", - " stats_key_to_input_ids[entry_overlap_key['stats_key']].add(id)\n", - " elif part == \"references\":\n", - " stats_key_to_reference_ids[entry_overlap_key['stats_key']].add(id)\n", - " return stats_key_to_input_ids, stats_key_to_reference_ids" + " return document\n", + " # r = re.compile(r\"[\\s{}]+\".format(re.escape(punctuation)))\n", + " # stats_key_to_input_ids = defaultdict(set)\n", + " # stats_key_to_reference_ids = defaultdict(set)\n", + " # document_tokens = r.split(document.lower())\n", + " # for n in ngram_index.keys():\n", + " # for document_ngram in ngrams(document_tokens, n):\n", + " # if document_ngram in ngram_index[n]:\n", + " # for entry_overlap_key in ngram_index[n][document_ngram]:\n", + " # id = entry_overlap_key['instance_id']\n", + " # part = entry_overlap_key['part']\n", + " # if part == \"input\":\n", + " # stats_key_to_input_ids[entry_overlap_key['stats_key']].add(id)\n", + " # elif part == \"references\":\n", + " # stats_key_to_reference_ids[entry_overlap_key['stats_key']].add(id)\n", + " # return stats_key_to_input_ids, stats_key_to_reference_ids" ] }, { @@ -263,7 +271,7 @@ " )\n", " return ngram_index\n", "\n", - " # SETUP\n", + " # # SETUP\n", " light_scenarios = []\n", " light_scenario_jsons = scenario_file.iter_lines()\n", " for light_scenario_json in light_scenario_jsons:\n", @@ -303,58 +311,57 @@ " document=document,\n", " ngram_index=ngram_index,\n", " )\n", + " print(batch_job)\n", " jobs.append(batch_job)\n", " \n", " # AGGREGATION\n", " stats_key_to_input_ids = []\n", " stats_key_to_reference_ids = []\n", - " results = [x.wait().get() for x in jobs]\n", - " for ids, refs in results:\n", - " stats_key_to_input_ids.append(ids)\n", - " stats_key_to_reference_ids.append(refs)\n", + " tmp_results = [x.wait() for x in jobs]\n", + " for res in tmp_results:\n", + " print(res)\n", + " \n", + " results = [x.get() for x in tmp_results]\n", + " all_data_overlap_stats = len(results)\n", + " # for ids, refs in results:\n", + " # stats_key_to_input_ids.append(ids)\n", + " # stats_key_to_reference_ids.append(refs)\n", "\n", - " total_input_ids = defaultdict(set)\n", - " total_reference_ids = defaultdict(set)\n", + " # total_input_ids = defaultdict(set)\n", + " # total_reference_ids = defaultdict(set)\n", " \n", - " for d in stats_key_to_input_ids:\n", - " for key in d:\n", - " new_set = set()\n", - " if key in total_input_ids:\n", - " new_set = total_input_ids[key]\n", - " new_set = new_set.union(d[key])\n", - " total_input_ids[key] = new_set\n", + " # for d in stats_key_to_input_ids:\n", + " # for key in d:\n", + " # new_set = set()\n", + " # if key in total_input_ids:\n", + " # new_set = total_input_ids[key]\n", + " # new_set = new_set.union(d[key])\n", + " # total_input_ids[key] = new_set\n", "\n", - " for d in stats_key_to_reference_ids:\n", - " for key in d:\n", - " new_set = set()\n", - " if key in total_reference_ids:\n", - " new_set = total_reference_ids[key]\n", - " new_set = total_reference_ids[key].union(d[key])\n", - " total_reference_ids[key] = new_set\n", + " # for d in stats_key_to_reference_ids:\n", + " # for key in d:\n", + " # new_set = set()\n", + " # if key in total_reference_ids:\n", + " # new_set = total_reference_ids[key]\n", + " # new_set = total_reference_ids[key].union(d[key])\n", + " # total_reference_ids[key] = new_set\n", " \n", - " all_data_overlap_stats = []\n", - " for stats_key, count in stats_key_counts.items():\n", - " data_overlap_stats = {\n", - " 'data_overlap_stats_key': None,\n", - " 'num_instances': count,\n", - " 'instance_ids_with_overlapping_input': sorted(total_input_ids[stats_key]),\n", - " 'instance_ids_with_overlapping_reference': sorted(total_reference_ids[stats_key]),\n", - " }\n", - " stats_key['overlap_protocol_spec'] = hashabledict({'n': stats_key['overlap_protocol_spec']})\n", - " data_overlap_stats['data_overlap_stats_key'] = stats_key\n", - " all_data_overlap_stats.append(data_overlap_stats)\n", + " # all_data_overlap_stats = []\n", + " # for stats_key, count in stats_key_counts.items():\n", + " # data_overlap_stats = {\n", + " # 'data_overlap_stats_key': None,\n", + " # 'num_instances': count,\n", + " # 'instance_ids_with_overlapping_input': sorted(total_input_ids[stats_key]),\n", + " # 'instance_ids_with_overlapping_reference': sorted(total_reference_ids[stats_key]),\n", + " # }\n", + " # stats_key['overlap_protocol_spec'] = hashabledict({'n': stats_key['overlap_protocol_spec']})\n", + " # data_overlap_stats['data_overlap_stats_key'] = stats_key\n", + " # all_data_overlap_stats.append(data_overlap_stats)\n", "\n", "\n", " return all_data_overlap_stats" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, { "cell_type": "code", "execution_count": 11, @@ -370,10 +377,10 @@ { "data": { "text/html": [ - "
SyftSuccess: Request 3e11a65937e347378d3bf76e125b6706 changes applied

" + "
SyftSuccess: Request a1f5d2f13ae7415f82d7fddc61d949df changes applied

" ], "text/plain": [ - "SyftSuccess: Request 3e11a65937e347378d3bf76e125b6706 changes applied" + "SyftSuccess: Request a1f5d2f13ae7415f82d7fddc61d949df changes applied" ] }, "execution_count": 11, @@ -388,7 +395,7 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 12, "metadata": {}, "outputs": [], "source": [ @@ -397,7 +404,7 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 13, "metadata": {}, "outputs": [ { @@ -414,84 +421,105 @@ }, { "cell_type": "code", - "execution_count": 22, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n" - ] - } - ], - "source": [ - "job.subjobs[0].logs()" - ] - }, - { - "cell_type": "code", - "execution_count": 23, + "execution_count": 14, "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "execute_byte_code failed 'SyftError' object has no attribute 'get'\n" - ] - }, { "name": "stdout", "output_type": "stream", "text": [ + "PTR OK: True\n", + "PTR OK: True\n", + "PTR: 1f6ac2dc836240a6a2754de109dbad63\n", + "PTR OK: Err(\"Could not find item with uid 1f6ac2dc836240a6a2754de109dbad63, 'syft.service.code.user_code.user_func_main_function_47f8c8f3db3a30695a28e4a51e44916669ac3d111924cb614181c64b2c3b8323_31f98ee247cd22621c45db24f14ada2ce1cd22c511a538a50997062deecd7845..main_function.'\")\n", "Traceback (most recent call last):\n", - " File \"/home/teo/OpenMined/PySyft/packages/syft/src/syft/service/code/user_code.py\", line 1027, in execute_byte_code\n", + " File \"/home/teo/OpenMined/PySyft/packages/syft/src/syft/service/code/user_code.py\", line 1041, in execute_byte_code\n", " result = eval(evil_string, None, _locals) # nosec\n", " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", " File \"\", line 1, in \n", - " File \"\", line 84, in user_func_main_function_47f8c8f3db3a30695a28e4a51e44916669ac3d111924cb614181c64b2c3b8323_73a77532d2895c4ee14f1769101a5c52e0159961a924f8b95832e8e15e21724e\n", - " File \"\", line 57, in main_function\n", - " File \"\", line 57, in \n", - "AttributeError: 'SyftError' object has no attribute 'get'\n", + " File \"\", line 64, in user_func_main_function_47f8c8f3db3a30695a28e4a51e44916669ac3d111924cb614181c64b2c3b8323_31f98ee247cd22621c45db24f14ada2ce1cd22c511a538a50997062deecd7845\n", + " File \"\", line 53, in main_function\n", + " File \"/home/teo/OpenMined/PySyft/packages/syft/src/syft/service/code/user_code.py\", line 919, in launch_job\n", + " kw2id[k] = ptr.id\n", + " ^^^^^^\n", + "AttributeError: 'Err' object has no attribute 'id'\n", "\n", - "execute_byte_code failed 'SyftError' object has no attribute 'get'\n" + "execute_byte_code failed 'Err' object has no attribute 'id'\n" ] }, { - "ename": "AttributeError", - "evalue": "'SyftError' object has no attribute 'get'", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)", - "\u001b[1;32m/home/teo/OpenMined/PySyft/notebooks/helm/helm_shorter.ipynb Cell 17\u001b[0m line \u001b[0;36m1\n\u001b[0;32m----> 1\u001b[0m job\u001b[39m.\u001b[39;49mwait()\u001b[39m.\u001b[39;49mget()\n", - "\u001b[0;31mAttributeError\u001b[0m: 'SyftError' object has no attribute 'get'" + "name": "stderr", + "output_type": "stream", + "text": [ + "execute_byte_code failed 'Err' object has no attribute 'id'\n" ] + }, + { + "data": { + "text/html": [ + "
SyftError: Failed to run. 'Err' object has no attribute 'id'

" + ], + "text/plain": [ + "SyftError: Failed to run. 'Err' object has no attribute 'id'" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" } ], "source": [ - "job.wait().get()" + "job.wait()" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "ngram_uid = sy.UID('975079dee9d24653a2ec652dd6ebddcc')\n", + "document_uid = sy.UID('f472daba27fb4966801697c702027586')" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 16, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "{1}" + "'Could not find item with uid 975079dee9d24653a2ec652dd6ebddcc, \"975079dee9d24653a2ec652dd6ebddcc not in \"'" ] }, - "execution_count": 3, + "execution_count": 16, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "b.union(a)" + "client.api.services.action.get(uid=ngram_uid)" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "---------------------------------------------------------------------------\n", + "SyftAttributeError\n", + "---------------------------------------------------------------------------\n", + "Exception: 'APIModule' api.action object has no submodule or method 'get_all', you may not have permission to access the module you are trying to access\n" + ] + } + ], + "source": [ + "client.api.services.action.get_all()" ] } ], diff --git a/packages/syft/src/syft/service/code/user_code.py b/packages/syft/src/syft/service/code/user_code.py index a0bd08359d5..fba5d4857d0 100644 --- a/packages/syft/src/syft/service/code/user_code.py +++ b/packages/syft/src/syft/service/code/user_code.py @@ -72,6 +72,7 @@ from ..user.user_roles import ServiceRole from .code_parse import GlobalsVisitor from .unparse import unparse +from ..job.job_stash import Job UserVerifyKeyPartitionKey = PartitionKey(key="user_verify_key", type_=SyftVerifyKey) CodeHashPartitionKey = PartitionKey(key="code_hash", type_=int) @@ -869,8 +870,8 @@ class UserCodeExecutionResult(SyftObject): @serializable() class hashabledict(dict): - def __hash__(self): - return hash(tuple(sorted(self.items()))) + def __hash__(self): + return hash(tuple(sorted(self.items()))) def execute_byte_code( code_item: UserCode, kwargs: Dict[str, Any], context: AuthedServiceContext @@ -907,7 +908,14 @@ def launch_job(self, func: UserCode, **kwargs): kw2id = {} for k, v in kwargs.items(): value = ActionObject.from_obj(v) - ptr = action_service.set(context, value).ok() + ptr = action_service.set(context, value) + original_print("PTR OK:", ptr.is_ok()) + ptr = ptr.ok() + if not isinstance(v, str): + # original_print("Value:", value) + original_print("PTR:", ptr.id) + ptr = action_service.get(context, ptr.id) + original_print("PTR OK:", ptr) kw2id[k] = ptr.id # create new usercode with permissions @@ -931,6 +939,8 @@ def launch_job(self, func: UserCode, **kwargs): credentials=user_service.admin_verify_key(), role=ServiceRole.ADMIN, ) + original_print("What:", request) + original_print("Args:", kw2id) res = request_service.apply(admin_context, request.id) if not isinstance(res, SyftSuccess): raise ValueError(res) @@ -985,6 +995,10 @@ def print(*args, sep=" ", end="\n"): def to_str(arg: Any) -> str: if isinstance(arg, bytes): return arg.decode('utf-8') + if isinstance(arg, Job): + return f"JOB: {arg.id}" + if isinstance(arg, SyftError): + return f"JOB: {arg.message}" return arg new_str = sep.join([to_str(arg) for arg in args]) + end @@ -995,7 +1009,7 @@ def to_str(arg: Any) -> str: log_service.append( context=context, uid=context.job.log_id, new_str=new_str ) - return __builtin__.print("FUNCTION LOG:", *args, end=end, sep=sep) + return __builtin__.print("FUNCTION LOG:", *args, end=end, sep=sep, file=sys.stderr) else: print = original_print