From 9b7f840f07fbadcb13d558c29ed91b820c0e7517 Mon Sep 17 00:00:00 2001
From: Philipp Schaad <schaad.phil@gmail.com>
Date: Mon, 29 Jan 2024 13:50:33 +0100
Subject: [PATCH 1/7] Refactor SDFG List to CFG List

---
 dace/sdfg/sdfg.py  | 62 ++++++++++++++++------------------------------
 dace/sdfg/state.py | 52 ++++++++++++++++++++++++++++++++++++++
 2 files changed, 73 insertions(+), 41 deletions(-)

diff --git a/dace/sdfg/sdfg.py b/dace/sdfg/sdfg.py
index eb43a99a54..526779b1ca 100644
--- a/dace/sdfg/sdfg.py
+++ b/dace/sdfg/sdfg.py
@@ -498,7 +498,6 @@ def __init__(self,
         self.symbols = {}
         self._parent_sdfg = None
         self._parent_nsdfg_node = None
-        self._sdfg_list = [self]
         self._arrays = NestedDict()  # type: Dict[str, dt.Array]
         self.arg_names = []
         self._labels: Set[str] = set()
@@ -531,7 +530,7 @@ def __deepcopy__(self, memo):
         for k, v in self.__dict__.items():
             # Skip derivative attributes
             if k in ('_cached_start_block', '_edges', '_nodes', '_parent', '_parent_sdfg', '_parent_nsdfg_node',
-                     '_sdfg_list', '_transformation_hist'):
+                     '_cfg_list', '_transformation_hist'):
                 continue
             setattr(result, k, copy.deepcopy(v, memo))
         # Copy edges and nodes
@@ -547,12 +546,12 @@ def __deepcopy__(self, memo):
         # Copy SDFG list and transformation history
         if hasattr(self, '_transformation_hist'):
             setattr(result, '_transformation_hist', copy.deepcopy(self._transformation_hist, memo))
-        result._sdfg_list = []
+        result._cfg_list = []
         if self._parent_sdfg is None:
             # Avoid import loops
             from dace.transformation.passes.fusion_inline import FixNestedSDFGReferences
 
-            result._sdfg_list = result.reset_sdfg_list()
+            result._cfg_list = result.reset_cfg_list()
             fixed = FixNestedSDFGReferences().apply_pass(result, {})
             if fixed:
                 warnings.warn(f'Fixed {fixed} nested SDFG parent references during deep copy.')
@@ -564,8 +563,9 @@ def sdfg_id(self):
         """
         Returns the unique index of the current SDFG within the current
         tree of SDFGs (top-level SDFG is 0, nested SDFGs are greater).
+        :note: `sdfg_id` is deprecated, please use `cfg_id` instead.
         """
-        return self.sdfg_list.index(self)
+        return self.cfg_id
 
     def to_json(self, hash=False):
         """ Serializes this object to JSON format.
@@ -573,8 +573,9 @@ def to_json(self, hash=False):
             :return: A string representing the JSON-serialized SDFG.
         """
         # Location in the SDFG list (only for root SDFG)
-        if self.parent_sdfg is None:
-            self.reset_sdfg_list()
+        is_root = self.parent_sdfg is None
+        if is_root:
+            self.reset_cfg_list()
 
         tmp = super().to_json()
 
@@ -582,14 +583,11 @@ def to_json(self, hash=False):
         if 'constants_prop' in tmp['attributes']:
             tmp['attributes']['constants_prop'] = json.loads(dace.serialize.dumps(tmp['attributes']['constants_prop']))
 
-        tmp['sdfg_list_id'] = int(self.sdfg_id)
-        tmp['start_state'] = self._start_block
-
         tmp['attributes']['name'] = self.name
         if hash:
             tmp['attributes']['hash'] = self.hash_sdfg(tmp)
 
-        if int(self.sdfg_id) == 0:
+        if is_root:
             tmp['dace_version'] = dace.__version__
 
         return tmp
@@ -616,7 +614,7 @@ def from_json(cls, json_obj, context_info=None):
 
         dace.serialize.set_properties_from_json(ret,
                                                 json_obj,
-                                                ignore_properties={'constants_prop', 'name', 'hash', 'start_state'})
+                                                ignore_properties={'constants_prop', 'name', 'hash'})
 
         nodelist = []
         for n in nodes:
@@ -631,9 +629,6 @@ def from_json(cls, json_obj, context_info=None):
             e = dace.serialize.from_json(e)
             ret.add_edge(nodelist[int(e.src)], nodelist[int(e.dst)], e.data)
 
-        if 'start_state' in json_obj:
-            ret._start_block = json_obj['start_state']
-
         return ret
 
     def hash_sdfg(self, jsondict: Optional[Dict[str, Any]] = None) -> str:
@@ -650,8 +645,8 @@ def keyword_remover(json_obj: Any, last_keyword=""):
             # uniquely representing the SDFG. This, among other things, includes
             # the hash, name, transformation history, and meta attributes.
             if isinstance(json_obj, dict):
-                if 'sdfg_list_id' in json_obj:
-                    del json_obj['sdfg_list_id']
+                if 'cfg_list_id' in json_obj:
+                    del json_obj['cfg_list_id']
 
                 keys_to_delete = []
                 kv_to_recurse = []
@@ -901,8 +896,8 @@ def append_transformation(self, transformation):
         if Config.get_bool('store_history') is False:
             return
         # Make sure the transformation is appended to the root SDFG.
-        if self.sdfg_id != 0:
-            self.sdfg_list[0].append_transformation(transformation)
+        if self.cfg_id != 0:
+            self.cfg_list[0].append_transformation(transformation)
             return
 
         if not self.orig_sdfg:
@@ -1112,32 +1107,17 @@ def remove_data(self, name, validate=True):
         del self._arrays[name]
 
     def reset_sdfg_list(self):
-        if self.parent_sdfg is not None:
-            return self.parent_sdfg.reset_sdfg_list()
-        else:
-            # Propagate new SDFG list to all children
-            all_sdfgs = list(self.all_sdfgs_recursive())
-            for sd in all_sdfgs:
-                sd._sdfg_list = all_sdfgs
-        return self._sdfg_list
+        warnings.warn('reset_sdfg_list is deprecated, use reset_cfg_list instead', DeprecationWarning)
+        return self.reset_cfg_list()
 
     def update_sdfg_list(self, sdfg_list):
-        # TODO: Refactor
-        sub_sdfg_list = self._sdfg_list
-        for sdfg in sdfg_list:
-            if sdfg not in sub_sdfg_list:
-                sub_sdfg_list.append(sdfg)
-        if self._parent_sdfg is not None:
-            self._parent_sdfg.update_sdfg_list(sub_sdfg_list)
-            self._sdfg_list = self._parent_sdfg.sdfg_list
-            for sdfg in sub_sdfg_list:
-                sdfg._sdfg_list = self._sdfg_list
-        else:
-            self._sdfg_list = sub_sdfg_list
+        warnings.warn('update_sdfg_list is deprecated, use update_cfg_list instead', DeprecationWarning)
+        self.update_cfg_list(sdfg_list)
 
     @property
-    def sdfg_list(self) -> List['SDFG']:
-        return self._sdfg_list
+    def sdfg_list(self) -> List['ControlFlowRegion']:
+        warnings.warn('sdfg_list is deprecated, use cfg_list instead', DeprecationWarning)
+        return self.cfg_list
 
     def set_sourcecode(self, code: str, lang=None):
         """ Set the source code of this SDFG (for IDE purposes).
diff --git a/dace/sdfg/state.py b/dace/sdfg/state.py
index becebd1c28..fa98472f10 100644
--- a/dace/sdfg/state.py
+++ b/dace/sdfg/state.py
@@ -1137,6 +1137,10 @@ def parent_graph(self) -> 'ControlFlowRegion':
     def parent_graph(self, parent: Optional['ControlFlowRegion']):
         self._parent_graph = parent
 
+    @property
+    def block_id(self) -> int:
+        return self.parent_graph.node_id(self)
+
 
 @make_properties
 class SDFGState(OrderedMultiDiConnectorGraph[nd.Node, mm.Memlet], ControlFlowBlock, DataflowGraphView):
@@ -2373,6 +2377,38 @@ def __init__(self, label: str='', sdfg: Optional['SDFG'] = None):
         self._labels: Set[str] = set()
         self._start_block: Optional[int] = None
         self._cached_start_block: Optional[ControlFlowBlock] = None
+        self._cfg_list: List['ControlFlowRegion'] = [self]
+
+    def reset_cfg_list(self) -> List['ControlFlowRegion']:
+        if isinstance(self, dace.SDFG) and self.parent_sdfg is not None:
+            return self.parent_sdfg.reset_cfg_list()
+        elif self._parent_graph is not None:
+            return self._parent_graph.reset_cfg_list()
+        else:
+            # Propagate new CFG list to all children
+            all_cfgs = list(self.all_control_flow_regions(recursive=True))
+            for g in all_cfgs:
+                g._cfg_list = all_cfgs
+        return self._cfg_list
+
+    def update_cfg_list(self, cfg_list):
+        # TODO: Refactor
+        sub_cfg_list = self._cfg_list
+        for g in cfg_list:
+            if g not in sub_cfg_list:
+                sub_cfg_list.append(g)
+        ptarget = None
+        if isinstance(self, dace.SDFG) and self.parent_sdfg is not None:
+            ptarget = self.parent_sdfg
+        elif self._parent_graph is not None:
+            ptarget = self._parent_graph
+        if ptarget is not None:
+            ptarget.update_cfg_list(sub_cfg_list)
+            self._cfg_list = ptarget.cfg_list
+            for g in sub_cfg_list:
+                g._cfg_list = self._cfg_list
+        else:
+            self._cfg_list = sub_cfg_list
 
     def add_edge(self, src: ControlFlowBlock, dst: ControlFlowBlock, data: 'dace.sdfg.InterstateEdge'):
         """ Adds a new edge to the graph. Must be an InterstateEdge or a subclass thereof.
@@ -2523,6 +2559,10 @@ def to_json(self, parent=None):
         graph_json = OrderedDiGraph.to_json(self)
         block_json = ControlFlowBlock.to_json(self, parent)
         graph_json.update(block_json)
+
+        graph_json['cfg_list_id'] = int(self.cfg_id)
+        graph_json['start_block'] = self._start_block
+
         return graph_json
 
     ###################################################################
@@ -2574,6 +2614,18 @@ def __str__(self):
     def __repr__(self) -> str:
         return f'{self.__class__.__name__} ({self.label})'
 
+    @property
+    def cfg_list(self) -> List['ControlFlowRegion']:
+        return self._cfg_list
+
+    @property
+    def cfg_id(self) -> int:
+        """
+        Returns the unique index of the current CFG within the current tree of CFGs (Top-level CFG/SDFG is 0, nested
+        CFGs/SDFGs are greater).
+        """
+        return self.cfg_list.index(self)
+
     @property
     def start_block(self):
         """ Returns the starting block of this ControlFlowGraph. """

From bc8679f02bee569611e53483685c02805ca2095a Mon Sep 17 00:00:00 2001
From: Philipp Schaad <schaad.phil@gmail.com>
Date: Mon, 29 Jan 2024 14:01:21 +0100
Subject: [PATCH 2/7] Make sure no old style `sdfg_list` calls remain

---
 dace/codegen/targets/cpu.py                   |  2 +-
 dace/frontend/fortran/fortran_parser.py       |  2 +-
 dace/sdfg/analysis/cutout.py                  | 20 +++++++++----------
 dace/sdfg/nodes.py                            |  2 +-
 dace/sdfg/state.py                            |  2 +-
 dace/transformation/auto/auto_optimize.py     |  2 +-
 dace/transformation/change_strides.py         |  2 +-
 dace/transformation/dataflow/map_unroll.py    |  4 ++--
 .../dataflow/reduce_expansion.py              |  4 ++--
 .../interstate/multistate_inline.py           |  2 +-
 .../transformation/interstate/sdfg_nesting.py |  2 +-
 dace/transformation/optimizer.py              |  8 ++++----
 .../transformation/passes/pattern_matching.py |  4 ++--
 dace/transformation/subgraph/composite.py     |  2 +-
 dace/transformation/testing.py                |  2 +-
 dace/transformation/transformation.py         |  8 ++++----
 tests/codegen/nested_kernel_transient_test.py |  4 ++--
 .../writeset_underapproximation_test.py       |  2 +-
 tests/python_frontend/augassign_wcr_test.py   |  6 +++---
 19 files changed, 40 insertions(+), 40 deletions(-)

diff --git a/dace/codegen/targets/cpu.py b/dace/codegen/targets/cpu.py
index e2497cdb77..84d55c9910 100644
--- a/dace/codegen/targets/cpu.py
+++ b/dace/codegen/targets/cpu.py
@@ -1501,7 +1501,7 @@ def generate_nsdfg_header(self, sdfg, state, state_id, node, memlet_references,
         arguments = []
 
         if state_struct:
-            toplevel_sdfg: SDFG = sdfg.sdfg_list[0]
+            toplevel_sdfg: SDFG = sdfg.cfg_list[0]
             arguments.append(f'{cpp.mangle_dace_state_struct_name(toplevel_sdfg)} *__state')
 
         # Add "__restrict__" keywords to arguments that do not alias with others in the context of this SDFG
diff --git a/dace/frontend/fortran/fortran_parser.py b/dace/frontend/fortran/fortran_parser.py
index 21f61a171a..6870b29b07 100644
--- a/dace/frontend/fortran/fortran_parser.py
+++ b/dace/frontend/fortran/fortran_parser.py
@@ -1106,7 +1106,7 @@ def create_sdfg_from_string(
     sdfg.parent = None
     sdfg.parent_sdfg = None
     sdfg.parent_nsdfg_node = None
-    sdfg.reset_sdfg_list()
+    sdfg.reset_cfg_list()
     return sdfg
 
 
diff --git a/dace/sdfg/analysis/cutout.py b/dace/sdfg/analysis/cutout.py
index a72a6d7e54..94c86bb99c 100644
--- a/dace/sdfg/analysis/cutout.py
+++ b/dace/sdfg/analysis/cutout.py
@@ -82,7 +82,7 @@ def translate_transformation_into(self, transformation: Union[PatternTransformat
                     pass
         elif isinstance(transformation, MultiStateTransformation):
             new_sdfg_id = self._in_translation[transformation.sdfg_id]
-            new_sdfg = self.sdfg_list[new_sdfg_id]
+            new_sdfg = self.cfg_list[new_sdfg_id]
             transformation._sdfg = new_sdfg
             transformation.sdfg_id = new_sdfg_id
             for k in transformation.subgraph.keys():
@@ -140,8 +140,8 @@ def from_transformation(
             return cut_sdfg
 
         target_sdfg = sdfg
-        if transformation.sdfg_id >= 0 and target_sdfg.sdfg_list is not None:
-            target_sdfg = target_sdfg.sdfg_list[transformation.sdfg_id]
+        if transformation.sdfg_id >= 0 and target_sdfg.cfg_list is not None:
+            target_sdfg = target_sdfg.cfg_list[transformation.sdfg_id]
 
         if (all(isinstance(n, nd.Node) for n in affected_nodes) or
             isinstance(transformation, (SubgraphTransformation, SingleStateTransformation))):
@@ -308,7 +308,7 @@ def singlestate_cutout(cls,
         cutout._out_translation = out_translation
 
         # Translate in nested SDFG nodes and their SDFGs (their list id, specifically).
-        cutout.reset_sdfg_list()
+        cutout.reset_cfg_list()
         outers = set(in_translation.keys())
         for outer in outers:
             if isinstance(outer, nd.NestedSDFG):
@@ -467,7 +467,7 @@ def multistate_cutout(cls,
         cutout._in_translation = in_translation
         cutout._out_translation = out_translation
 
-        cutout.reset_sdfg_list()
+        cutout.reset_cfg_list()
         _recursively_set_nsdfg_parents(cutout)
 
         return cutout
@@ -495,8 +495,8 @@ def _transformation_determine_affected_nodes(
     affected_nodes = set()
 
     if isinstance(transformation, PatternTransformation):
-        if transformation.sdfg_id >= 0 and target_sdfg.sdfg_list:
-            target_sdfg = target_sdfg.sdfg_list[transformation.sdfg_id]
+        if transformation.sdfg_id >= 0 and target_sdfg.cfg_list:
+            target_sdfg = target_sdfg.cfg_list[transformation.sdfg_id]
 
         for k, _ in transformation._get_pattern_nodes().items():
             try:
@@ -526,8 +526,8 @@ def _transformation_determine_affected_nodes(
             # This is a transformation that affects a nested SDFG node, grab that NSDFG node.
             affected_nodes.add(target_sdfg.parent_nsdfg_node)
     else:
-        if transformation.sdfg_id >= 0 and target_sdfg.sdfg_list:
-            target_sdfg = target_sdfg.sdfg_list[transformation.sdfg_id]
+        if transformation.sdfg_id >= 0 and target_sdfg.cfg_list:
+            target_sdfg = target_sdfg.cfg_list[transformation.sdfg_id]
 
         subgraph = transformation.get_subgraph(target_sdfg)
         for n in subgraph.nodes():
@@ -901,7 +901,7 @@ def _determine_cutout_reachability(
     """
     if state_reach is None:
         original_sdfg_id = out_translation[ct.sdfg_id]
-        state_reachability_dict = StateReachability().apply_pass(sdfg.sdfg_list[original_sdfg_id], None)
+        state_reachability_dict = StateReachability().apply_pass(sdfg.cfg_list[original_sdfg_id], None)
         state_reach = state_reachability_dict[original_sdfg_id]
     inverse_cutout_reach: Set[SDFGState] = set()
     cutout_reach: Set[SDFGState] = set()
diff --git a/dace/sdfg/nodes.py b/dace/sdfg/nodes.py
index a21974a899..b1a95b6e32 100644
--- a/dace/sdfg/nodes.py
+++ b/dace/sdfg/nodes.py
@@ -585,7 +585,7 @@ def from_json(json_obj, context=None):
 
         ret.sdfg.parent_nsdfg_node = ret
 
-        ret.sdfg.update_sdfg_list([])
+        ret.sdfg.update_cfg_list([])
 
         return ret
 
diff --git a/dace/sdfg/state.py b/dace/sdfg/state.py
index fa98472f10..f2b5bc2589 100644
--- a/dace/sdfg/state.py
+++ b/dace/sdfg/state.py
@@ -1544,7 +1544,7 @@ def add_nested_sdfg(
         sdfg.parent = self
         sdfg.parent_sdfg = self.sdfg
 
-        sdfg.update_sdfg_list([])
+        sdfg.update_cfg_list([])
 
         # Make dictionary of autodetect connector types from set
         if isinstance(inputs, (set, collections.abc.KeysView)):
diff --git a/dace/transformation/auto/auto_optimize.py b/dace/transformation/auto/auto_optimize.py
index bb384cfd9a..08d62048b5 100644
--- a/dace/transformation/auto/auto_optimize.py
+++ b/dace/transformation/auto/auto_optimize.py
@@ -570,7 +570,7 @@ def auto_optimize(sdfg: SDFG,
     sdfg.apply_transformations_repeated(TrivialMapElimination, validate=validate, validate_all=validate_all)
     while transformed:
         sdfg.simplify(validate=False, validate_all=validate_all)
-        for s in sdfg.sdfg_list:
+        for s in sdfg.cfg_list:
             xfh.split_interstate_edges(s)
         l2ms = sdfg.apply_transformations_repeated((LoopToMap, RefineNestedAccess),
                                                    validate=False,
diff --git a/dace/transformation/change_strides.py b/dace/transformation/change_strides.py
index 001cd4aa63..1bff95b3d1 100644
--- a/dace/transformation/change_strides.py
+++ b/dace/transformation/change_strides.py
@@ -101,7 +101,7 @@ def change_strides(
     # Map of array names in the nested sdfg:  key: array name in parent sdfg (this sdfg), value: name in the nsdfg
     # Assumes that name changes only appear in the first level of nsdfg nesting
     array_names_map = {}
-    for graph in sdfg.sdfg_list:
+    for graph in sdfg.cfg_list:
         if graph.parent_nsdfg_node is not None:
             if graph.parent_sdfg == sdfg:
                 for connector in graph.parent_nsdfg_node.in_connectors:
diff --git a/dace/transformation/dataflow/map_unroll.py b/dace/transformation/dataflow/map_unroll.py
index 858900e2a8..60ef419932 100644
--- a/dace/transformation/dataflow/map_unroll.py
+++ b/dace/transformation/dataflow/map_unroll.py
@@ -91,7 +91,7 @@ def apply(self, state: SDFGState, sdfg: SDFG):
                     # Set all the references
                     unrolled_nsdfg.parent = state
                     unrolled_nsdfg.parent_sdfg = sdfg
-                    unrolled_nsdfg.update_sdfg_list([])
+                    unrolled_nsdfg.update_cfg_list([])
                     unrolled_node.sdfg = unrolled_nsdfg
                     unrolled_nsdfg.parent_nsdfg_node = unrolled_node
                 else:
@@ -130,7 +130,7 @@ def apply(self, state: SDFGState, sdfg: SDFG):
 
         # If we added a bunch of new nested SDFGs, reset the internal list
         if len(nested_sdfgs) > 0:
-            sdfg.reset_sdfg_list()
+            sdfg.reset_cfg_list()
 
         # Remove local memories that were replicated
         for mem in local_memories:
diff --git a/dace/transformation/dataflow/reduce_expansion.py b/dace/transformation/dataflow/reduce_expansion.py
index 5a108ccb7a..dd93e42654 100644
--- a/dace/transformation/dataflow/reduce_expansion.py
+++ b/dace/transformation/dataflow/reduce_expansion.py
@@ -183,7 +183,7 @@ def expand(self, sdfg: SDFG, graph: SDFGState, reduce_node):
                 LocalStorage.node_a: nsdfg.sdfg.nodes()[0].nodes().index(inner_exit),
                 LocalStorage.node_b: nsdfg.sdfg.nodes()[0].nodes().index(outer_exit)
             }
-            nsdfg_id = nsdfg.sdfg.sdfg_list.index(nsdfg.sdfg)
+            nsdfg_id = nsdfg.sdfg.cfg_list.index(nsdfg.sdfg)
             nstate_id = 0
             local_storage = OutLocalStorage()
             local_storage.setup_match(nsdfg.sdfg, nsdfg_id, nstate_id, local_storage_subgraph, 0)
@@ -215,7 +215,7 @@ def expand(self, sdfg: SDFG, graph: SDFGState, reduce_node):
                 LocalStorage.node_b: nsdfg.sdfg.nodes()[0].nodes().index(inner_entry)
             }
 
-            nsdfg_id = nsdfg.sdfg.sdfg_list.index(nsdfg.sdfg)
+            nsdfg_id = nsdfg.sdfg.cfg_list.index(nsdfg.sdfg)
             nstate_id = 0
             local_storage = InLocalStorage()
             local_storage.setup_match(nsdfg.sdfg, nsdfg_id, nstate_id, local_storage_subgraph, 0)
diff --git a/dace/transformation/interstate/multistate_inline.py b/dace/transformation/interstate/multistate_inline.py
index 4d560ab70a..8623bdf468 100644
--- a/dace/transformation/interstate/multistate_inline.py
+++ b/dace/transformation/interstate/multistate_inline.py
@@ -420,7 +420,7 @@ def apply(self, outer_state: SDFGState, sdfg: SDFG):
         # Remove nested SDFG and state
         sdfg.remove_node(outer_state)
 
-        sdfg._sdfg_list = sdfg.reset_sdfg_list()
+        sdfg._cfg_list = sdfg.reset_cfg_list()
 
         return nsdfg.nodes()
 
diff --git a/dace/transformation/interstate/sdfg_nesting.py b/dace/transformation/interstate/sdfg_nesting.py
index fc3ebfbdca..2e4ebc31da 100644
--- a/dace/transformation/interstate/sdfg_nesting.py
+++ b/dace/transformation/interstate/sdfg_nesting.py
@@ -591,7 +591,7 @@ def apply(self, state: SDFGState, sdfg: SDFG):
             if state.degree(dnode) == 0 and dnode not in isolated_nodes:
                 state.remove_node(dnode)
 
-        sdfg._sdfg_list = sdfg.reset_sdfg_list()
+        sdfg._cfg_list = sdfg.reset_cfg_list()
 
     def _modify_access_to_access(self,
                                  input_edges: Dict[nodes.Node, MultiConnectorEdge],
diff --git a/dace/transformation/optimizer.py b/dace/transformation/optimizer.py
index 87e920b2eb..4cb4997ef4 100644
--- a/dace/transformation/optimizer.py
+++ b/dace/transformation/optimizer.py
@@ -102,11 +102,11 @@ def get_actions(actions, graph, match):
             return actions
 
         def get_dataflow_actions(actions, sdfg, match):
-            graph = sdfg.sdfg_list[match.sdfg_id].nodes()[match.state_id]
+            graph = sdfg.cfg_list[match.sdfg_id].nodes()[match.state_id]
             return get_actions(actions, graph, match)
 
         def get_stateflow_actions(actions, sdfg, match):
-            graph = sdfg.sdfg_list[match.sdfg_id]
+            graph = sdfg.cfg_list[match.sdfg_id]
             return get_actions(actions, graph, match)
 
         actions = dict()
@@ -207,7 +207,7 @@ def optimize(self):
             ui_options = sorted(self.get_pattern_matches())
             ui_options_idx = 0
             for pattern_match in ui_options:
-                sdfg = self.sdfg.sdfg_list[pattern_match.sdfg_id]
+                sdfg = self.sdfg.cfg_list[pattern_match.sdfg_id]
                 pattern_match._sdfg = sdfg
                 print('%d. Transformation %s' % (ui_options_idx, pattern_match.print_match(sdfg)))
                 ui_options_idx += 1
@@ -238,7 +238,7 @@ def optimize(self):
                 break
 
             match_id = (str(occurrence) if pattern_name is None else '%s$%d' % (pattern_name, occurrence))
-            sdfg = self.sdfg.sdfg_list[pattern_match.sdfg_id]
+            sdfg = self.sdfg.cfg_list[pattern_match.sdfg_id]
             graph = sdfg.node(pattern_match.state_id) if pattern_match.state_id >= 0 else sdfg
             pattern_match._sdfg = sdfg
             print('You selected (%s) pattern %s with parameters %s' %
diff --git a/dace/transformation/passes/pattern_matching.py b/dace/transformation/passes/pattern_matching.py
index 2bbea14915..3f4d51dd9d 100644
--- a/dace/transformation/passes/pattern_matching.py
+++ b/dace/transformation/passes/pattern_matching.py
@@ -103,7 +103,7 @@ def apply_pass(self, sdfg: SDFG, pipeline_results: Dict[str, Any]) -> Dict[str,
             except StopIteration:
                 continue
 
-            tsdfg = sdfg.sdfg_list[match.sdfg_id]
+            tsdfg = sdfg.cfg_list[match.sdfg_id]
             graph = tsdfg.node(match.state_id) if match.state_id >= 0 else tsdfg
 
             # Set previous pipeline results
@@ -156,7 +156,7 @@ def __init__(self,
     # Helper function for applying and validating a transformation
     def _apply_and_validate(self, match: xf.PatternTransformation, sdfg: SDFG, start: float,
                             pipeline_results: Dict[str, Any], applied_transformations: Dict[str, Any]):
-        tsdfg = sdfg.sdfg_list[match.sdfg_id]
+        tsdfg = sdfg.cfg_list[match.sdfg_id]
         graph = tsdfg.node(match.state_id) if match.state_id >= 0 else tsdfg
 
         # Set previous pipeline results
diff --git a/dace/transformation/subgraph/composite.py b/dace/transformation/subgraph/composite.py
index fd1824f4a0..ba71b786f8 100644
--- a/dace/transformation/subgraph/composite.py
+++ b/dace/transformation/subgraph/composite.py
@@ -64,7 +64,7 @@ def can_be_applied(self, sdfg: SDFG, subgraph: SubgraphView) -> bool:
                 # deepcopy
                 graph_indices = [i for (i, n) in enumerate(graph.nodes()) if n in subgraph]
                 sdfg_copy = copy.deepcopy(sdfg)
-                sdfg_copy.reset_sdfg_list()
+                sdfg_copy.reset_cfg_list()
                 graph_copy = sdfg_copy.nodes()[sdfg.nodes().index(graph)]
                 subgraph_copy = SubgraphView(graph_copy, [graph_copy.nodes()[i] for i in graph_indices])
                 expansion.sdfg_id = sdfg_copy.sdfg_id
diff --git a/dace/transformation/testing.py b/dace/transformation/testing.py
index 29bb0b8e01..00fcf84426 100644
--- a/dace/transformation/testing.py
+++ b/dace/transformation/testing.py
@@ -68,7 +68,7 @@ def _optimize_recursive(self, sdfg: SDFG, depth: int):
 
                 print('    ' * depth, type(match).__name__, '- ', end='', file=self.stdout)
 
-                tsdfg: SDFG = new_sdfg.sdfg_list[match.sdfg_id]
+                tsdfg: SDFG = new_sdfg.cfg_list[match.sdfg_id]
                 tgraph = tsdfg.node(match.state_id) if match.state_id >= 0 else tsdfg
                 match._sdfg = tsdfg
                 match.apply(tgraph, tsdfg)
diff --git a/dace/transformation/transformation.py b/dace/transformation/transformation.py
index b4cbccdac3..7ad84e8f4d 100644
--- a/dace/transformation/transformation.py
+++ b/dace/transformation/transformation.py
@@ -224,7 +224,7 @@ def apply_pattern(self, append: bool = True, annotate: bool = True) -> Union[Any
         """
         if append:
             self._sdfg.append_transformation(self)
-        tsdfg: SDFG = self._sdfg.sdfg_list[self.sdfg_id]
+        tsdfg: SDFG = self._sdfg.cfg_list[self.sdfg_id]
         tgraph = tsdfg.node(self.state_id) if self.state_id >= 0 else tsdfg
         retval = self.apply(tgraph, tsdfg)
         if annotate and not self.annotates_memlets():
@@ -616,7 +616,7 @@ def apply(self, state, sdfg, *args, **kwargs):
                 nsdfg = expansion.sdfg
                 nsdfg.parent = state
                 nsdfg.parent_sdfg = sdfg
-                nsdfg.update_sdfg_list([])
+                nsdfg.update_cfg_list([])
                 nsdfg.parent_nsdfg_node = expansion
 
                 # Update schedule to match library node schedule
@@ -723,7 +723,7 @@ def setup_match(self, subgraph: Union[Set[int], gr.SubgraphView], sdfg_id: int =
             self.state_id = state_id
 
     def get_subgraph(self, sdfg: SDFG) -> gr.SubgraphView:
-        sdfg = sdfg.sdfg_list[self.sdfg_id]
+        sdfg = sdfg.cfg_list[self.sdfg_id]
         if self.state_id == -1:
             return gr.SubgraphView(sdfg, list(map(sdfg.node, self.subgraph)))
         state = sdfg.node(self.state_id)
@@ -748,7 +748,7 @@ def subclasses_recursive(cls) -> Set[Type['PatternTransformation']]:
         return result
 
     def subgraph_view(self, sdfg: SDFG) -> gr.SubgraphView:
-        graph = sdfg.sdfg_list[self.sdfg_id]
+        graph = sdfg.cfg_list[self.sdfg_id]
         if self.state_id != -1:
             graph = graph.node(self.state_id)
         return gr.SubgraphView(graph, [graph.node(idx) for idx in self.subgraph])
diff --git a/tests/codegen/nested_kernel_transient_test.py b/tests/codegen/nested_kernel_transient_test.py
index d9af60c5fc..b37f5ab083 100644
--- a/tests/codegen/nested_kernel_transient_test.py
+++ b/tests/codegen/nested_kernel_transient_test.py
@@ -48,7 +48,7 @@ def transient(A: dace.float64[128, 64]):
     sdfg.apply_gpu_transformations()
 
     if persistent:
-        sdfg.sdfg_list[-1].arrays['gpu_A'].lifetime = dace.AllocationLifetime.Persistent
+        sdfg.cfg_list[-1].arrays['gpu_A'].lifetime = dace.AllocationLifetime.Persistent
 
     a = np.random.rand(128, 64)
     expected = np.copy(a)
@@ -84,7 +84,7 @@ def transient(A: dace.float64[128, 64]):
     sdfg.apply_gpu_transformations()
 
     if persistent:
-        sdfg.sdfg_list[-1].arrays['gpu_A'].lifetime = dace.AllocationLifetime.Persistent
+        sdfg.cfg_list[-1].arrays['gpu_A'].lifetime = dace.AllocationLifetime.Persistent
 
     a = np.random.rand(128, 64)
     expected = np.copy(a)
diff --git a/tests/passes/writeset_underapproximation_test.py b/tests/passes/writeset_underapproximation_test.py
index a696c5ba24..d0c04c9d0b 100644
--- a/tests/passes/writeset_underapproximation_test.py
+++ b/tests/passes/writeset_underapproximation_test.py
@@ -329,7 +329,7 @@ def loop(A: dace.float64[N, M]):
 
     results = pipeline.apply_pass(sdfg, {})[UnderapproximateWrites.__name__]
 
-    nsdfg = sdfg.sdfg_list[1].parent_nsdfg_node
+    nsdfg = sdfg.cfg_list[1].parent_nsdfg_node
     map_state = sdfg.states()[0]
     result = results["approximation"]
     edge = map_state.out_edges(nsdfg)[0]
diff --git a/tests/python_frontend/augassign_wcr_test.py b/tests/python_frontend/augassign_wcr_test.py
index d460f7d0e7..2294b582ac 100644
--- a/tests/python_frontend/augassign_wcr_test.py
+++ b/tests/python_frontend/augassign_wcr_test.py
@@ -59,7 +59,7 @@ def test_augassign_wcr():
     with dace.config.set_temporary('frontend', 'avoid_wcr', value=True):
         test_sdfg = augassign_wcr.to_sdfg(simplify=False)
     wcr_count = 0
-    for sdfg in test_sdfg.sdfg_list:
+    for sdfg in test_sdfg.cfg_list:
         for state in sdfg.nodes():
             for edge in state.edges():
                 if edge.data.wcr:
@@ -80,7 +80,7 @@ def test_augassign_wcr2():
     with dace.config.set_temporary('frontend', 'avoid_wcr', value=True):
         test_sdfg = augassign_wcr2.to_sdfg(simplify=False)
     wcr_count = 0
-    for sdfg in test_sdfg.sdfg_list:
+    for sdfg in test_sdfg.cfg_list:
         for state in sdfg.nodes():
             for edge in state.edges():
                 if edge.data.wcr:
@@ -104,7 +104,7 @@ def test_augassign_wcr3():
     with dace.config.set_temporary('frontend', 'avoid_wcr', value=True):
         test_sdfg = augassign_wcr3.to_sdfg(simplify=False)
     wcr_count = 0
-    for sdfg in test_sdfg.sdfg_list:
+    for sdfg in test_sdfg.cfg_list:
         for state in sdfg.nodes():
             for edge in state.edges():
                 if edge.data.wcr:

From 68a6b621ff9e3dc880704178517599855c08c50c Mon Sep 17 00:00:00 2001
From: Philipp Schaad <schaad.phil@gmail.com>
Date: Mon, 29 Jan 2024 14:13:59 +0100
Subject: [PATCH 3/7] Fix deserializataion for control flow regions

---
 dace/sdfg/sdfg.py  |  3 +++
 dace/sdfg/state.py | 31 +++++++++++++++++++++++++++++++
 2 files changed, 34 insertions(+)

diff --git a/dace/sdfg/sdfg.py b/dace/sdfg/sdfg.py
index 526779b1ca..484bab8116 100644
--- a/dace/sdfg/sdfg.py
+++ b/dace/sdfg/sdfg.py
@@ -629,6 +629,9 @@ def from_json(cls, json_obj, context_info=None):
             e = dace.serialize.from_json(e)
             ret.add_edge(nodelist[int(e.src)], nodelist[int(e.dst)], e.data)
 
+        if 'start_block' in json_obj:
+            ret._start_block = json_obj['start_block']
+
         return ret
 
     def hash_sdfg(self, jsondict: Optional[Dict[str, Any]] = None) -> str:
diff --git a/dace/sdfg/state.py b/dace/sdfg/state.py
index f2b5bc2589..2e828f4696 100644
--- a/dace/sdfg/state.py
+++ b/dace/sdfg/state.py
@@ -11,6 +11,7 @@
 from typing import TYPE_CHECKING, Any, AnyStr, Dict, Iterable, Iterator, List, Optional, Set, Tuple, Union, overload
 
 import dace
+import dace.serialize
 from dace import data as dt
 from dace import dtypes
 from dace import memlet as mm
@@ -2565,6 +2566,36 @@ def to_json(self, parent=None):
 
         return graph_json
 
+    @classmethod
+    def from_json(cls, json_obj, context_info=None):
+        context_info = context_info or {'sdfg': None, 'parent_graph': None}
+        _type = json_obj['type']
+        if _type != cls.__name__:
+            raise TypeError("Class type mismatch")
+
+        attrs = json_obj['attributes']
+        nodes = json_obj['nodes']
+        edges = json_obj['edges']
+
+        ret = ControlFlowRegion(label=attrs['label'], sdfg=context_info['sdfg'])
+
+        dace.serialize.set_properties_from_json(ret, json_obj)
+
+        nodelist = []
+        for n in nodes:
+            nci = copy.copy(context_info)
+            nci['parent_graph'] = ret
+
+            state = SDFGState.from_json(n, context=nci)
+            ret.add_node(state)
+            nodelist.append(state)
+
+        for e in edges:
+            e = dace.serialize.from_json(e)
+            ret.add_edge(nodelist[int(e.src)], nodelist[int(e.dst)], e.data)
+
+        return ret
+
     ###################################################################
     # Traversal methods
 

From 40cd861d9e3114d6bdf56884e9d5019ae38c9aa6 Mon Sep 17 00:00:00 2001
From: Philipp Schaad <schaad.phil@gmail.com>
Date: Mon, 29 Jan 2024 14:17:15 +0100
Subject: [PATCH 4/7] Fix deserialization

---
 dace/sdfg/state.py | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/dace/sdfg/state.py b/dace/sdfg/state.py
index 2e828f4696..337d2729d8 100644
--- a/dace/sdfg/state.py
+++ b/dace/sdfg/state.py
@@ -2594,6 +2594,9 @@ def from_json(cls, json_obj, context_info=None):
             e = dace.serialize.from_json(e)
             ret.add_edge(nodelist[int(e.src)], nodelist[int(e.dst)], e.data)
 
+        if 'start_block' in json_obj:
+            ret._start_block = json_obj['start_block']
+
         return ret
 
     ###################################################################

From 482c30f4f47c6724216148dd85f35332ed60ec79 Mon Sep 17 00:00:00 2001
From: Philipp Schaad <schaad.phil@gmail.com>
Date: Mon, 29 Jan 2024 14:38:40 +0100
Subject: [PATCH 5/7] Remove legacy calls to sdfg_list

---
 dace/codegen/control_flow.py                  | 12 ++---
 .../codegen/instrumentation/data/data_dump.py |  4 +-
 dace/codegen/instrumentation/gpu_events.py    |  4 +-
 dace/codegen/instrumentation/likwid.py        | 52 +++++++++----------
 dace/codegen/instrumentation/provider.py      |  2 +-
 dace/codegen/instrumentation/report.py        |  8 +--
 dace/codegen/instrumentation/timer.py         |  4 +-
 dace/codegen/prettycode.py                    |  2 +-
 dace/codegen/targets/cpp.py                   | 10 ++--
 dace/codegen/targets/cpu.py                   | 10 ++--
 dace/codegen/targets/cuda.py                  |  8 +--
 dace/codegen/targets/fpga.py                  | 14 ++---
 dace/codegen/targets/framecode.py             | 44 ++++++++--------
 dace/codegen/targets/intel_fpga.py            |  8 +--
 dace/codegen/targets/mlir/mlir.py             |  2 +-
 dace/codegen/targets/rtl.py                   |  2 +-
 dace/codegen/targets/snitch.py                |  4 +-
 dace/codegen/targets/xilinx.py                |  8 +--
 dace/libraries/standard/nodes/reduce.py       |  6 +--
 .../on_the_fly_map_fusion_tuner.py            |  8 +--
 dace/optimization/subgraph_fusion_tuner.py    |  8 +--
 dace/runtime/include/dace/perf/reporting.h    | 18 +++----
 dace/sdfg/analysis/cutout.py                  | 38 +++++++-------
 .../analysis/schedule_tree/sdfg_to_tree.py    |  2 +-
 dace/sdfg/nodes.py                            |  4 +-
 dace/sdfg/propagation.py                      |  4 +-
 dace/sdfg/utils.py                            | 10 ++--
 dace/sdfg/validation.py                       |  8 +--
 dace/sdfg/work_depth_analysis/helpers.py      | 10 ++--
 dace/sourcemap.py                             | 40 +++++++-------
 dace/transformation/auto/auto_optimize.py     |  2 +-
 .../dataflow/double_buffering.py              |  4 +-
 dace/transformation/dataflow/mapreduce.py     |  4 +-
 dace/transformation/dataflow/mpi.py           | 12 ++---
 .../dataflow/reduce_expansion.py              |  6 +--
 dace/transformation/dataflow/tiling.py        |  6 +--
 .../interstate/fpga_transform_sdfg.py         |  8 +--
 dace/transformation/optimizer.py              |  8 +--
 dace/transformation/passes/analysis.py        | 28 +++++-----
 .../passes/array_elimination.py               | 10 ++--
 .../passes/constant_propagation.py            |  4 +-
 .../passes/dead_dataflow_elimination.py       |  4 +-
 dace/transformation/passes/optional_arrays.py |  8 +--
 .../transformation/passes/pattern_matching.py |  6 +--
 dace/transformation/passes/prune_symbols.py   |  2 +-
 .../passes/reference_reduction.py             |  6 +--
 dace/transformation/passes/scalar_fission.py  |  2 +-
 dace/transformation/passes/simplify.py        |  2 +-
 dace/transformation/passes/symbol_ssa.py      |  2 +-
 dace/transformation/subgraph/composite.py     | 10 ++--
 .../transformation/subgraph/stencil_tiling.py | 10 ++--
 dace/transformation/testing.py                |  2 +-
 dace/transformation/transformation.py         | 38 +++++++-------
 samples/instrumentation/matmul_likwid.py      |  2 +-
 tests/codegen/allocation_lifetime_test.py     |  6 +--
 tests/parse_state_struct_test.py              |  2 +-
 .../block_allreduce_cudatest.py               |  4 +-
 .../subgraph_fusion/reduction_test.py         |  2 +-
 tests/transformations/subgraph_fusion/util.py |  4 +-
 59 files changed, 279 insertions(+), 279 deletions(-)

diff --git a/dace/codegen/control_flow.py b/dace/codegen/control_flow.py
index a198ed371b..2460816793 100644
--- a/dace/codegen/control_flow.py
+++ b/dace/codegen/control_flow.py
@@ -126,7 +126,7 @@ class SingleState(ControlFlow):
     def as_cpp(self, codegen, symbols) -> str:
         sdfg = self.state.parent
 
-        expr = '__state_{}_{}:;\n'.format(sdfg.sdfg_id, self.state.label)
+        expr = '__state_{}_{}:;\n'.format(sdfg.cfg_id, self.state.label)
         if self.state.number_of_nodes() > 0:
             expr += '{\n'
             expr += self.dispatch_state(self.state)
@@ -138,7 +138,7 @@ def as_cpp(self, codegen, symbols) -> str:
 
         # If any state has no children, it should jump to the end of the SDFG
         if not self.last_state and sdfg.out_degree(self.state) == 0:
-            expr += 'goto __state_exit_{};\n'.format(sdfg.sdfg_id)
+            expr += 'goto __state_exit_{};\n'.format(sdfg.cfg_id)
         return expr
 
     def generate_transition(self,
@@ -175,7 +175,7 @@ def generate_transition(self,
 
         if (not edge.data.is_unconditional()
                 or ((successor is None or edge.dst is not successor) and not assignments_only)):
-            expr += 'goto __state_{}_{};\n'.format(sdfg.sdfg_id, edge.dst.label)
+            expr += 'goto __state_{}_{};\n'.format(sdfg.cfg_id, edge.dst.label)
 
         if not edge.data.is_unconditional() and not assignments_only:
             expr += '}\n'
@@ -257,7 +257,7 @@ def as_cpp(self, codegen, symbols) -> str:
                 # One unconditional edge
                 if (len(out_edges) == 1 and out_edges[0].data.is_unconditional()):
                     continue
-                expr += f'goto __state_exit_{sdfg.sdfg_id};\n'
+                expr += f'goto __state_exit_{sdfg.cfg_id};\n'
 
         return expr
 
@@ -326,7 +326,7 @@ def as_cpp(self, codegen, symbols) -> str:
         # execution should end, so we emit an "else goto exit" here.
         if len(self.body) > 0:
             expr += ' else {\n'
-        expr += 'goto __state_exit_{};\n'.format(self.sdfg.sdfg_id)
+        expr += 'goto __state_exit_{};\n'.format(self.sdfg.cfg_id)
         if len(self.body) > 0:
             expr += '\n}'
         return expr
@@ -475,7 +475,7 @@ def as_cpp(self, codegen, symbols) -> str:
             expr += f'case {case}: {{\n'
             expr += body.as_cpp(codegen, symbols)
             expr += 'break;\n}\n'
-        expr += f'default: goto __state_exit_{self.sdfg.sdfg_id};'
+        expr += f'default: goto __state_exit_{self.sdfg.cfg_id};'
         expr += '\n}\n'
         return expr
 
diff --git a/dace/codegen/instrumentation/data/data_dump.py b/dace/codegen/instrumentation/data/data_dump.py
index 2217524d19..5fc487f94d 100644
--- a/dace/codegen/instrumentation/data/data_dump.py
+++ b/dace/codegen/instrumentation/data/data_dump.py
@@ -161,7 +161,7 @@ def on_node_end(self, sdfg: SDFG, state: SDFGState, node: nodes.AccessNode, oute
         # Create UUID
         state_id = sdfg.node_id(state)
         node_id = state.node_id(node)
-        uuid = f'{sdfg.sdfg_id}_{state_id}_{node_id}'
+        uuid = f'{sdfg.cfg_id}_{state_id}_{node_id}'
 
         # Get optional pre/postamble for instrumenting device data
         preamble, postamble = '', ''
@@ -277,7 +277,7 @@ def on_node_begin(self, sdfg: SDFG, state: SDFGState, node: nodes.AccessNode, ou
         # Create UUID
         state_id = sdfg.node_id(state)
         node_id = state.node_id(node)
-        uuid = f'{sdfg.sdfg_id}_{state_id}_{node_id}'
+        uuid = f'{sdfg.cfg_id}_{state_id}_{node_id}'
 
         # Get optional pre/postamble for instrumenting device data
         preamble, postamble = '', ''
diff --git a/dace/codegen/instrumentation/gpu_events.py b/dace/codegen/instrumentation/gpu_events.py
index 04dec2632c..d6fc21f305 100644
--- a/dace/codegen/instrumentation/gpu_events.py
+++ b/dace/codegen/instrumentation/gpu_events.py
@@ -65,11 +65,11 @@ def _report(self, timer_name: str, sdfg=None, state=None, node=None):
 int __dace_micros_{id} = (int) (__dace_ms_{id} * 1000.0);
 unsigned long int __dace_ts_end_{id} = std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::high_resolution_clock::now().time_since_epoch()).count();
 unsigned long int __dace_ts_start_{id} = __dace_ts_end_{id} - __dace_micros_{id};
-__state->report.add_completion("{timer_name}", "GPU", __dace_ts_start_{id}, __dace_ts_end_{id}, {sdfg_id}, {state_id}, {node_id});'''.format(
+__state->report.add_completion("{timer_name}", "GPU", __dace_ts_start_{id}, __dace_ts_end_{id}, {cfg_id}, {state_id}, {node_id});'''.format(
             id=idstr,
             timer_name=timer_name,
             backend=self.backend,
-            sdfg_id=sdfg.sdfg_id,
+            cfg_id=sdfg.cfg_id,
             state_id=state_id,
             node_id=node_id)
 
diff --git a/dace/codegen/instrumentation/likwid.py b/dace/codegen/instrumentation/likwid.py
index e4f9c3154e..efbd6da934 100644
--- a/dace/codegen/instrumentation/likwid.py
+++ b/dace/codegen/instrumentation/likwid.py
@@ -169,7 +169,7 @@ def on_sdfg_end(self, sdfg, local_stream, global_stream):
 '''
         local_stream.write(outer_code, sdfg)
 
-        for region, sdfg_id, state_id, node_id in self._regions:
+        for region, cfg_id, state_id, node_id in self._regions:
             report_code = f'''
 #pragma omp parallel
 {{
@@ -187,7 +187,7 @@ def on_sdfg_end(self, sdfg, local_stream, global_stream):
 
         for (int t = 0; t < num_threads; t++)
         {{
-            __state->report.add_completion("Timer", "likwid", 0, time[t] * 1000 * 1000, t, {sdfg_id}, {state_id}, {node_id});
+            __state->report.add_completion("Timer", "likwid", 0, time[t] * 1000 * 1000, t, {cfg_id}, {state_id}, {node_id});
         }}
 
         for (int i = 0; i < nevents; i++)
@@ -196,7 +196,7 @@ def on_sdfg_end(self, sdfg, local_stream, global_stream):
             
             for (int t = 0; t < num_threads; t++)
             {{
-                __state->report.add_counter("{region}", "likwid", event_name, events[t][i], t, {sdfg_id}, {state_id}, {node_id});
+                __state->report.add_counter("{region}", "likwid", event_name, events[t][i], t, {cfg_id}, {state_id}, {node_id});
             }}
         }}
     }}
@@ -214,11 +214,11 @@ def on_state_begin(self, sdfg, state, local_stream, global_stream):
             return
 
         if state.instrument == dace.InstrumentationType.LIKWID_CPU:
-            sdfg_id = sdfg.sdfg_id
+            cfg_id = sdfg.cfg_id
             state_id = sdfg.node_id(state)
             node_id = -1
-            region = f"state_{sdfg_id}_{state_id}_{node_id}"
-            self._regions.append((region, sdfg_id, state_id, node_id))
+            region = f"state_{cfg_id}_{state_id}_{node_id}"
+            self._regions.append((region, cfg_id, state_id, node_id))
 
             marker_code = f'''
 #pragma omp parallel
@@ -250,10 +250,10 @@ def on_state_end(self, sdfg, state, local_stream, global_stream):
             return
 
         if state.instrument == dace.InstrumentationType.LIKWID_CPU:
-            sdfg_id = sdfg.sdfg_id
+            cfg_id = sdfg.cfg_id
             state_id = sdfg.node_id(state)
             node_id = -1
-            region = f"state_{sdfg_id}_{state_id}_{node_id}"
+            region = f"state_{cfg_id}_{state_id}_{node_id}"
 
             marker_code = f'''
 #pragma omp parallel
@@ -272,12 +272,12 @@ def on_scope_entry(self, sdfg, state, node, outer_stream, inner_stream, global_s
         elif node.schedule not in LIKWIDInstrumentationCPU.perf_whitelist_schedules:
             raise TypeError("Unsupported schedule on scope")
 
-        sdfg_id = sdfg.sdfg_id
+        cfg_id = sdfg.cfg_id
         state_id = sdfg.node_id(state)
         node_id = state.node_id(node)
-        region = f"scope_{sdfg_id}_{state_id}_{node_id}"
+        region = f"scope_{cfg_id}_{state_id}_{node_id}"
 
-        self._regions.append((region, sdfg_id, state_id, node_id))
+        self._regions.append((region, cfg_id, state_id, node_id))
         marker_code = f'''
 #pragma omp parallel
 {{
@@ -294,10 +294,10 @@ def on_scope_exit(self, sdfg, state, node, outer_stream, inner_stream, global_st
         if not self._likwid_used or entry_node.instrument != dace.InstrumentationType.LIKWID_CPU:
             return
 
-        sdfg_id = sdfg.sdfg_id
+        cfg_id = sdfg.cfg_id
         state_id = sdfg.node_id(state)
         node_id = state.node_id(entry_node)
-        region = f"scope_{sdfg_id}_{state_id}_{node_id}"
+        region = f"scope_{cfg_id}_{state_id}_{node_id}"
 
         marker_code = f'''
 #pragma omp parallel
@@ -366,7 +366,7 @@ def on_sdfg_end(self, sdfg, local_stream, global_stream):
         if not self._likwid_used or sdfg.parent is not None:
             return
 
-        for region, sdfg_id, state_id, node_id in self._regions:
+        for region, cfg_id, state_id, node_id in self._regions:
             report_code = f'''
 {{
     double *events = (double*) malloc(MAX_NUM_EVENTS * sizeof(double));
@@ -377,14 +377,14 @@ def on_sdfg_end(self, sdfg, local_stream, global_stream):
 
     LIKWID_NVMARKER_GET("{region}", &ngpus, &nevents, &events, &time, &count);
 
-    __state->report.add_completion("Timer", "likwid_gpu", 0, time * 1000 * 1000, 0, {sdfg_id}, {state_id}, {node_id});
+    __state->report.add_completion("Timer", "likwid_gpu", 0, time * 1000 * 1000, 0, {cfg_id}, {state_id}, {node_id});
     
     int gid = nvmon_getIdOfActiveGroup();
     for (int i = 0; i < nevents; i++)
     {{
         char* event_name = nvmon_getEventName(gid, i); 
         
-        __state->report.add_counter("{region}", "likwid_gpu", event_name, events[i], 0, {sdfg_id}, {state_id}, {node_id});
+        __state->report.add_counter("{region}", "likwid_gpu", event_name, events[i], 0, {cfg_id}, {state_id}, {node_id});
     }}
 
     free(events);
@@ -402,11 +402,11 @@ def on_state_begin(self, sdfg, state, local_stream, global_stream):
             return
 
         if state.instrument == dace.InstrumentationType.LIKWID_GPU:
-            sdfg_id = sdfg.sdfg_id
+            cfg_id = sdfg.cfg_id
             state_id = sdfg.node_id(state)
             node_id = -1
-            region = f"state_{sdfg_id}_{state_id}_{node_id}"
-            self._regions.append((region, sdfg_id, state_id, node_id))
+            region = f"state_{cfg_id}_{state_id}_{node_id}"
+            self._regions.append((region, cfg_id, state_id, node_id))
 
             marker_code = f'''
 LIKWID_NVMARKER_REGISTER("{region}");
@@ -424,10 +424,10 @@ def on_state_end(self, sdfg, state, local_stream, global_stream):
             return
 
         if state.instrument == dace.InstrumentationType.LIKWID_GPU:
-            sdfg_id = sdfg.sdfg_id
+            cfg_id = sdfg.cfg_id
             state_id = sdfg.node_id(state)
             node_id = -1
-            region = f"state_{sdfg_id}_{state_id}_{node_id}"
+            region = f"state_{cfg_id}_{state_id}_{node_id}"
 
             marker_code = f'''
 LIKWID_NVMARKER_STOP("{region}");
@@ -443,12 +443,12 @@ def on_scope_entry(self, sdfg, state, node, outer_stream, inner_stream, global_s
         elif node.schedule not in LIKWIDInstrumentationGPU.perf_whitelist_schedules:
             raise TypeError("Unsupported schedule on scope")
 
-        sdfg_id = sdfg.sdfg_id
+        cfg_id = sdfg.cfg_id
         state_id = sdfg.node_id(state)
         node_id = state.node_id(node)
-        region = f"scope_{sdfg_id}_{state_id}_{node_id}"
+        region = f"scope_{cfg_id}_{state_id}_{node_id}"
 
-        self._regions.append((region, sdfg_id, state_id, node_id))
+        self._regions.append((region, cfg_id, state_id, node_id))
         marker_code = f'''
 LIKWID_NVMARKER_REGISTER("{region}");
 
@@ -465,10 +465,10 @@ def on_scope_exit(self, sdfg, state, node, outer_stream, inner_stream, global_st
         if not self._likwid_used or entry_node.instrument != dace.InstrumentationType.LIKWID_GPU:
             return
 
-        sdfg_id = sdfg.sdfg_id
+        cfg_id = sdfg.cfg_id
         state_id = sdfg.node_id(state)
         node_id = state.node_id(entry_node)
-        region = f"scope_{sdfg_id}_{state_id}_{node_id}"
+        region = f"scope_{cfg_id}_{state_id}_{node_id}"
 
         marker_code = f'''
 LIKWID_NVMARKER_STOP("{region}");
diff --git a/dace/codegen/instrumentation/provider.py b/dace/codegen/instrumentation/provider.py
index 455395c54a..d05e8b001d 100644
--- a/dace/codegen/instrumentation/provider.py
+++ b/dace/codegen/instrumentation/provider.py
@@ -27,7 +27,7 @@ class types, given the currently-registered extensions of this class.
 
     def _idstr(self, sdfg, state, node):
         """ Returns a unique identifier string from a node or state. """
-        result = str(sdfg.sdfg_id)
+        result = str(sdfg.cfg_id)
         if state is not None:
             result += '_' + str(sdfg.node_id(state))
             if node is not None:
diff --git a/dace/codegen/instrumentation/report.py b/dace/codegen/instrumentation/report.py
index cb0b545784..48c2905bf1 100644
--- a/dace/codegen/instrumentation/report.py
+++ b/dace/codegen/instrumentation/report.py
@@ -16,7 +16,7 @@
 def _uuid_to_dict(uuid: UUIDType) -> Dict[str, int]:
     result = {}
     if uuid[0] != -1:
-        result['sdfg_id'] = uuid[0]
+        result['cfg_id'] = uuid[0]
     if uuid[1] != -1:
         result['state_id'] = uuid[1]
     if uuid[2] != -1:
@@ -83,13 +83,13 @@ def get_event_uuid_and_other_info(event) -> Tuple[UUIDType, Dict[str, Any]]:
         other_info = {}
         if 'args' in event:
             args = event['args']
-            if 'sdfg_id' in args and args['sdfg_id'] is not None:
-                uuid = (args['sdfg_id'], -1, -1)
+            if 'cfg_id' in args and args['cfg_id'] is not None:
+                uuid = (args['cfg_id'], -1, -1)
                 if 'state_id' in args and args['state_id'] is not None:
                     uuid = (uuid[0], args['state_id'], -1)
                     if 'id' in args and args['id'] is not None:
                         uuid = (uuid[0], uuid[1], args['id'])
-            other_info = {k: v for k, v in args.items() if k not in ('sdfg_id', 'state_id', 'id')}
+            other_info = {k: v for k, v in args.items() if k not in ('cfg_id', 'state_id', 'id')}
         return uuid, other_info
 
     def __init__(self, filename: str):
diff --git a/dace/codegen/instrumentation/timer.py b/dace/codegen/instrumentation/timer.py
index 5de5025359..a13e50faca 100644
--- a/dace/codegen/instrumentation/timer.py
+++ b/dace/codegen/instrumentation/timer.py
@@ -40,8 +40,8 @@ def on_tend(self, timer_name: str, stream: CodeIOStream, sdfg=None, state=None,
         stream.write('''auto __dace_tend_{id} = std::chrono::high_resolution_clock::now();
 unsigned long int __dace_ts_start_{id} = std::chrono::duration_cast<std::chrono::microseconds>(__dace_tbegin_{id}.time_since_epoch()).count();
 unsigned long int __dace_ts_end_{id} = std::chrono::duration_cast<std::chrono::microseconds>(__dace_tend_{id}.time_since_epoch()).count();
-__state->report.add_completion("{timer_name}", "Timer", __dace_ts_start_{id}, __dace_ts_end_{id}, {sdfg_id}, {state_id}, {node_id});'''
-                     .format(timer_name=timer_name, id=idstr, sdfg_id=sdfg.sdfg_id, state_id=state_id, node_id=node_id))
+__state->report.add_completion("{timer_name}", "Timer", __dace_ts_start_{id}, __dace_ts_end_{id}, {cfg_id}, {state_id}, {node_id});'''
+                     .format(timer_name=timer_name, id=idstr, cfg_id=sdfg.cfg_id, state_id=state_id, node_id=node_id))
 
     # Code generation hooks
     def on_state_begin(self, sdfg, state, local_stream, global_stream):
diff --git a/dace/codegen/prettycode.py b/dace/codegen/prettycode.py
index ebfe426080..72096ca819 100644
--- a/dace/codegen/prettycode.py
+++ b/dace/codegen/prettycode.py
@@ -30,7 +30,7 @@ def write(self, contents, sdfg=None, state_id=None, node_id=None):
 
         # If SDFG/state/node location is given, annotate this line
         if sdfg is not None:
-            location_identifier = '  ////__DACE:%d' % sdfg.sdfg_id
+            location_identifier = '  ////__DACE:%d' % sdfg.cfg_id
             if state_id is not None:
                 location_identifier += ':' + str(state_id)
                 if node_id is not None:
diff --git a/dace/codegen/targets/cpp.py b/dace/codegen/targets/cpp.py
index f3f1424297..106491cf9f 100644
--- a/dace/codegen/targets/cpp.py
+++ b/dace/codegen/targets/cpp.py
@@ -246,15 +246,15 @@ def ptr(name: str, desc: data.Data, sdfg: SDFG = None, framecode=None) -> str:
         from dace.codegen.targets.cuda import CUDACodeGen  # Avoid import loop
 
         if desc.storage == dtypes.StorageType.CPU_ThreadLocal:  # Use unambiguous name for thread-local arrays
-            return f'__{sdfg.sdfg_id}_{name}'
+            return f'__{sdfg.cfg_id}_{name}'
         elif not CUDACodeGen._in_device_code:  # GPU kernels cannot access state
-            return f'__state->__{sdfg.sdfg_id}_{name}'
+            return f'__state->__{sdfg.cfg_id}_{name}'
         elif (sdfg, name) in framecode.where_allocated and framecode.where_allocated[(sdfg, name)] is not sdfg:
-            return f'__{sdfg.sdfg_id}_{name}'
+            return f'__{sdfg.cfg_id}_{name}'
     elif (desc.transient and sdfg is not None and framecode is not None and (sdfg, name) in framecode.where_allocated
           and framecode.where_allocated[(sdfg, name)] is not sdfg):
         # Array allocated for another SDFG, use unambiguous name
-        return f'__{sdfg.sdfg_id}_{name}'
+        return f'__{sdfg.cfg_id}_{name}'
 
     return name
 
@@ -897,7 +897,7 @@ def unparse_tasklet(sdfg, state_id, dfg, node, function_stream, callsite_stream,
             # Doesn't cause crashes due to missing pyMLIR if a MLIR tasklet is not present
             from dace.codegen.targets.mlir import utils
 
-            mlir_func_uid = "_" + str(sdfg.sdfg_id) + "_" + str(state_id) + "_" + str(dfg.node_id(node))
+            mlir_func_uid = "_" + str(sdfg.cfg_id) + "_" + str(state_id) + "_" + str(dfg.node_id(node))
 
             mlir_ast = utils.get_ast(node.code.code)
             mlir_is_generic = utils.is_generic(mlir_ast)
diff --git a/dace/codegen/targets/cpu.py b/dace/codegen/targets/cpu.py
index 84d55c9910..a7369182dd 100644
--- a/dace/codegen/targets/cpu.py
+++ b/dace/codegen/targets/cpu.py
@@ -919,7 +919,7 @@ def process_out_memlets(self,
                 shared_data_name = edge.data.data
                 if not shared_data_name:
                     # Very unique name. TODO: Make more intuitive
-                    shared_data_name = '__dace_%d_%d_%d_%d_%s' % (sdfg.sdfg_id, state_id, dfg.node_id(node),
+                    shared_data_name = '__dace_%d_%d_%d_%d_%s' % (sdfg.cfg_id, state_id, dfg.node_id(node),
                                                                   dfg.node_id(dst_node), edge.src_conn)
 
                 result.write(
@@ -1329,7 +1329,7 @@ def _generate_Tasklet(self, sdfg, dfg, state_id, node, function_stream, callsite
                     shared_data_name = edge.data.data
                     if not shared_data_name:
                         # Very unique name. TODO: Make more intuitive
-                        shared_data_name = '__dace_%d_%d_%d_%d_%s' % (sdfg.sdfg_id, state_id, dfg.node_id(src_node),
+                        shared_data_name = '__dace_%d_%d_%d_%d_%s' % (sdfg.cfg_id, state_id, dfg.node_id(src_node),
                                                                       dfg.node_id(node), edge.src_conn)
 
                     # Read variable from shared storage
@@ -1398,7 +1398,7 @@ def _generate_Tasklet(self, sdfg, dfg, state_id, node, function_stream, callsite
                 local_name = edge.data.data
                 if not local_name:
                     # Very unique name. TODO: Make more intuitive
-                    local_name = '__dace_%d_%d_%d_%d_%s' % (sdfg.sdfg_id, state_id, dfg.node_id(node),
+                    local_name = '__dace_%d_%d_%d_%d_%s' % (sdfg.cfg_id, state_id, dfg.node_id(node),
                                                             dfg.node_id(dst_node), edge.src_conn)
 
                 # Allocate variable type
@@ -1624,7 +1624,7 @@ def _generate_NestedSDFG(
             # If the SDFG has a unique name, use it
             sdfg_label = node.unique_name
         else:
-            sdfg_label = "%s_%d_%d_%d" % (node.sdfg.name, sdfg.sdfg_id, state_id, dfg.node_id(node))
+            sdfg_label = "%s_%d_%d_%d" % (node.sdfg.name, sdfg.cfg_id, state_id, dfg.node_id(node))
 
         code_already_generated = False
         if unique_functions and not inline:
@@ -2015,7 +2015,7 @@ def _generate_ConsumeEntry(
                     ctype = node.out_connectors[edge.src_conn].ctype
                     if not local_name:
                         # Very unique name. TODO: Make more intuitive
-                        local_name = '__dace_%d_%d_%d_%d_%s' % (sdfg.sdfg_id, state_id, dfg.node_id(
+                        local_name = '__dace_%d_%d_%d_%d_%s' % (sdfg.cfg_id, state_id, dfg.node_id(
                             edge.src), dfg.node_id(edge.dst), edge.src_conn)
 
                     # Allocate variable type
diff --git a/dace/codegen/targets/cuda.py b/dace/codegen/targets/cuda.py
index 4e008e13ac..b370101091 100644
--- a/dace/codegen/targets/cuda.py
+++ b/dace/codegen/targets/cuda.py
@@ -229,8 +229,8 @@ def _compute_pool_release(self, top_sdfg: SDFG):
                 reachability = ap.StateReachability().apply_pass(top_sdfg, {})
                 access_nodes = ap.FindAccessStates().apply_pass(top_sdfg, {})
 
-            reachable = reachability[sdfg.sdfg_id]
-            access_sets = access_nodes[sdfg.sdfg_id]
+            reachable = reachability[sdfg.cfg_id]
+            access_sets = access_nodes[sdfg.cfg_id]
             for state in sdfg.nodes():
                 # Find all data descriptors that will no longer be used after this state
                 last_state_arrays: Set[str] = set(
@@ -649,7 +649,7 @@ def allocate_stream(self, sdfg, dfg, state_id, node, nodedesc, function_stream,
                 'allocname': allocname,
                 'type': nodedesc.dtype.ctype,
                 'is_pow2': sym2cpp(sympy.log(nodedesc.buffer_size, 2).is_Integer),
-                'location': '%s_%s_%s' % (sdfg.sdfg_id, state_id, dfg.node_id(node))
+                'location': '%s_%s_%s' % (sdfg.cfg_id, state_id, dfg.node_id(node))
             }
 
             ctypedef = 'dace::GPUStream<{type}, {is_pow2}>'.format(**fmtargs)
@@ -1407,7 +1407,7 @@ def generate_scope(self, sdfg, dfg_scope, state_id, function_stream, callsite_st
                     create_grid_barrier = True
 
         self.create_grid_barrier = create_grid_barrier
-        kernel_name = '%s_%d_%d_%d' % (scope_entry.map.label, sdfg.sdfg_id, sdfg.node_id(state),
+        kernel_name = '%s_%d_%d_%d' % (scope_entry.map.label, sdfg.cfg_id, sdfg.node_id(state),
                                        state.node_id(scope_entry))
 
         # Comprehend grid/block dimensions from scopes
diff --git a/dace/codegen/targets/fpga.py b/dace/codegen/targets/fpga.py
index 8df8fe94fa..db47324268 100644
--- a/dace/codegen/targets/fpga.py
+++ b/dace/codegen/targets/fpga.py
@@ -616,9 +616,9 @@ def generate_state(self, sdfg: dace.SDFG, state: dace.SDFGState, function_stream
                 # Create a unique kernel name to avoid name clashes
                 # If this kernels comes from a Nested SDFG, use that name also
                 if sdfg.parent_nsdfg_node is not None:
-                    kernel_name = f"{sdfg.parent_nsdfg_node.label}_{state.label}_{kern_id}_{sdfg.sdfg_id}"
+                    kernel_name = f"{sdfg.parent_nsdfg_node.label}_{state.label}_{kern_id}_{sdfg.cfg_id}"
                 else:
-                    kernel_name = f"{state.label}_{kern_id}_{sdfg.sdfg_id}"
+                    kernel_name = f"{state.label}_{kern_id}_{sdfg.cfg_id}"
 
                 # Vitis HLS removes double underscores, which leads to a compilation
                 # error down the road due to kernel name mismatch. Remove them here
@@ -676,7 +676,7 @@ def generate_state(self, sdfg: dace.SDFG, state: dace.SDFGState, function_stream
             ## Generate the global function here
 
             kernel_host_stream = CodeIOStream()
-            host_function_name = f"__dace_runstate_{sdfg.sdfg_id}_{state.name}_{state_id}"
+            host_function_name = f"__dace_runstate_{sdfg.cfg_id}_{state.name}_{state_id}"
             function_stream.write("\n\nDACE_EXPORTED void {}({});\n\n".format(host_function_name,
                                                                               ", ".join(kernel_args_opencl)))
 
@@ -717,8 +717,8 @@ def generate_state(self, sdfg: dace.SDFG, state: dace.SDFGState, function_stream
                 kernel_host_stream.write(f"""\
 const unsigned long int _dace_fpga_end_us = std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::high_resolution_clock::now().time_since_epoch()).count();
 // Convert from nanoseconds (reported by OpenCL) to microseconds (expected by the profiler)
-__state->report.add_completion("Full FPGA kernel runtime for {state.label}", "FPGA", 1e-3 * first_start, 1e-3 * last_end, {sdfg.sdfg_id}, {state_id}, -1);
-__state->report.add_completion("Full FPGA state runtime for {state.label}", "FPGA", _dace_fpga_begin_us, _dace_fpga_end_us, {sdfg.sdfg_id}, {state_id}, -1);
+__state->report.add_completion("Full FPGA kernel runtime for {state.label}", "FPGA", 1e-3 * first_start, 1e-3 * last_end, {sdfg.cfg_id}, {state_id}, -1);
+__state->report.add_completion("Full FPGA state runtime for {state.label}", "FPGA", _dace_fpga_begin_us, _dace_fpga_end_us, {sdfg.cfg_id}, {state_id}, -1);
 """)
                 if Config.get_bool("instrumentation", "print_fpga_runtime"):
                     kernel_host_stream.write(f"""
@@ -2387,7 +2387,7 @@ def make_ptr_vector_cast(self, *args, **kwargs):
     def make_ptr_assignment(self, *args, **kwargs):
         return self._cpu_codegen.make_ptr_assignment(*args, codegen=self, **kwargs)
 
-    def instrument_opencl_kernel(self, kernel_name: str, state_id: int, sdfg_id: int, code_stream: CodeIOStream):
+    def instrument_opencl_kernel(self, kernel_name: str, state_id: int, cfg_id: int, code_stream: CodeIOStream):
         """
         Emits code to instrument the OpenCL kernel with the given `kernel_name`.
         """
@@ -2414,5 +2414,5 @@ def instrument_opencl_kernel(self, kernel_name: str, state_id: int, sdfg_id: int
     last_end = event_end;
 }}
 // Convert from nanoseconds (reported by OpenCL) to microseconds (expected by the profiler)
-__state->report.add_completion("{kernel_name}", "FPGA", 1e-3 * event_start, 1e-3 * event_end, {sdfg_id}, {state_id}, -1);{print_str}
+__state->report.add_completion("{kernel_name}", "FPGA", 1e-3 * event_start, 1e-3 * event_end, {cfg_id}, {state_id}, -1);{print_str}
 }}""")
diff --git a/dace/codegen/targets/framecode.py b/dace/codegen/targets/framecode.py
index 7b6df55132..80bb39eed5 100644
--- a/dace/codegen/targets/framecode.py
+++ b/dace/codegen/targets/framecode.py
@@ -52,7 +52,7 @@ def __init__(self, sdfg: SDFG):
 
         # resolve all symbols and constants
         # first handle root
-        self._symbols_and_constants[sdfg.sdfg_id] = sdfg.free_symbols.union(sdfg.constants_prop.keys())
+        self._symbols_and_constants[sdfg.cfg_id] = sdfg.free_symbols.union(sdfg.constants_prop.keys())
         # then recurse
         for nested, state in sdfg.all_nodes_recursive():
             if isinstance(nested, nodes.NestedSDFG):
@@ -63,7 +63,7 @@ def __init__(self, sdfg: SDFG):
                 # found a new nested sdfg: resolve symbols and constants
                 result = nsdfg.free_symbols.union(nsdfg.constants_prop.keys())
 
-                parent_constants = self._symbols_and_constants[nsdfg._parent_sdfg.sdfg_id]
+                parent_constants = self._symbols_and_constants[nsdfg._parent_sdfg.cfg_id]
                 result |= parent_constants
 
                 # check for constant inputs
@@ -72,11 +72,11 @@ def __init__(self, sdfg: SDFG):
                         # this edge is constant => propagate to nested sdfg
                         result.add(edge.dst_conn)
 
-                self._symbols_and_constants[nsdfg.sdfg_id] = result
+                self._symbols_and_constants[nsdfg.cfg_id] = result
 
     # Cached fields
     def symbols_and_constants(self, sdfg: SDFG):
-        return self._symbols_and_constants[sdfg.sdfg_id]
+        return self._symbols_and_constants[sdfg.cfg_id]
 
     def free_symbols(self, obj: Any):
         k = id(obj)
@@ -390,7 +390,7 @@ def generate_external_memory_management(self, sdfg: SDFG, callsite_stream: CodeI
             
             offset = 0
             for subsdfg, aname, arr in arrays:
-                allocname = f'__state->__{subsdfg.sdfg_id}_{aname}'
+                allocname = f'__state->__{subsdfg.cfg_id}_{aname}'
                 callsite_stream.write(f'{allocname} = decltype({allocname})(ptr + {sym2cpp(offset)});', subsdfg)
                 offset += arr.total_size * arr.dtype.bytes
             
@@ -449,7 +449,7 @@ def generate_state(self, sdfg, state, global_stream, callsite_stream, generate_s
     def generate_states(self, sdfg, global_stream, callsite_stream):
         states_generated = set()
 
-        opbar = progress.OptionalProgressBar(sdfg.number_of_nodes(), title=f'Generating code (SDFG {sdfg.sdfg_id})')
+        opbar = progress.OptionalProgressBar(sdfg.number_of_nodes(), title=f'Generating code (SDFG {sdfg.cfg_id})')
 
         # Create closure + function for state dispatcher
         def dispatch_state(state: SDFGState) -> str:
@@ -482,7 +482,7 @@ def dispatch_state(state: SDFGState) -> str:
         opbar.done()
 
         # Write exit label
-        callsite_stream.write(f'__state_exit_{sdfg.sdfg_id}:;', sdfg)
+        callsite_stream.write(f'__state_exit_{sdfg.cfg_id}:;', sdfg)
 
         return states_generated
 
@@ -539,8 +539,8 @@ def determine_allocation_lifetime(self, top_sdfg: SDFG):
         reachability = StateReachability().apply_pass(top_sdfg, {})
         access_instances: Dict[int, Dict[str, List[Tuple[SDFGState, nodes.AccessNode]]]] = {}
         for sdfg in top_sdfg.all_sdfgs_recursive():
-            shared_transients[sdfg.sdfg_id] = sdfg.shared_transients(check_toplevel=False)
-            fsyms[sdfg.sdfg_id] = self.symbols_and_constants(sdfg)
+            shared_transients[sdfg.cfg_id] = sdfg.shared_transients(check_toplevel=False)
+            fsyms[sdfg.cfg_id] = self.symbols_and_constants(sdfg)
 
             #############################################
             # Look for all states in which a scope-allocated array is used in
@@ -562,7 +562,7 @@ def determine_allocation_lifetime(self, top_sdfg: SDFG):
                     instances[edge_array].append((state, nodes.AccessNode(edge_array)))
             #############################################
 
-            access_instances[sdfg.sdfg_id] = instances
+            access_instances[sdfg.cfg_id] = instances
 
         for sdfg, name, desc in top_sdfg.arrays_recursive():
             if not desc.transient:
@@ -584,9 +584,9 @@ def determine_allocation_lifetime(self, top_sdfg: SDFG):
             # 6. True if deallocation should take place, otherwise False.
 
             first_state_instance, first_node_instance = \
-                access_instances[sdfg.sdfg_id].get(name, [(None, None)])[0]
+                access_instances[sdfg.cfg_id].get(name, [(None, None)])[0]
             last_state_instance, last_node_instance = \
-                access_instances[sdfg.sdfg_id].get(name, [(None, None)])[-1]
+                access_instances[sdfg.cfg_id].get(name, [(None, None)])[-1]
 
             # Cases
             if desc.lifetime in (dtypes.AllocationLifetime.Persistent, dtypes.AllocationLifetime.External):
@@ -597,7 +597,7 @@ def determine_allocation_lifetime(self, top_sdfg: SDFG):
                 if first_node_instance is None:
                     continue
 
-                definition = desc.as_arg(name=f'__{sdfg.sdfg_id}_{name}') + ';'
+                definition = desc.as_arg(name=f'__{sdfg.cfg_id}_{name}') + ';'
 
                 if desc.storage != dtypes.StorageType.CPU_ThreadLocal:  # If thread-local, skip struct entry
                     self.statestruct.append(definition)
@@ -614,7 +614,7 @@ def determine_allocation_lifetime(self, top_sdfg: SDFG):
                 if first_node_instance is None:
                     continue
 
-                definition = desc.as_arg(name=f'__{sdfg.sdfg_id}_{name}') + ';'
+                definition = desc.as_arg(name=f'__{sdfg.cfg_id}_{name}') + ';'
                 self.statestruct.append(definition)
 
                 self.to_allocate[top_sdfg].append((sdfg, first_state_instance, first_node_instance, True, True, True))
@@ -627,7 +627,7 @@ def determine_allocation_lifetime(self, top_sdfg: SDFG):
             # a kernel).
             alloc_scope: Union[nodes.EntryNode, SDFGState, SDFG] = None
             alloc_state: SDFGState = None
-            if (name in shared_transients[sdfg.sdfg_id] or desc.lifetime is dtypes.AllocationLifetime.SDFG):
+            if (name in shared_transients[sdfg.cfg_id] or desc.lifetime is dtypes.AllocationLifetime.SDFG):
                 # SDFG descriptors are allocated in the beginning of their SDFG
                 alloc_scope = sdfg
                 if first_state_instance is not None:
@@ -741,14 +741,14 @@ def determine_allocation_lifetime(self, top_sdfg: SDFG):
 
             # Check if Array/View is dependent on non-free SDFG symbols
             # NOTE: Tuple is (SDFG, State, Node, declare, allocate, deallocate)
-            fsymbols = fsyms[sdfg.sdfg_id]
+            fsymbols = fsyms[sdfg.cfg_id]
             if (not isinstance(curscope, nodes.EntryNode)
                     and utils.is_nonfree_sym_dependent(first_node_instance, desc, first_state_instance, fsymbols)):
                 # Allocate in first State, deallocate in last State
                 if first_state_instance != last_state_instance:
                     # If any state is not reachable from first state, find common denominators in the form of
                     # dominator and postdominator.
-                    instances = access_instances[sdfg.sdfg_id][name]
+                    instances = access_instances[sdfg.cfg_id][name]
 
                     # A view gets "allocated" everywhere it appears
                     if isinstance(desc, (data.StructureView, data.View)):
@@ -758,7 +758,7 @@ def determine_allocation_lifetime(self, top_sdfg: SDFG):
                         self.where_allocated[(sdfg, name)] = cursdfg
                         continue
 
-                    if any(inst not in reachability[sdfg.sdfg_id][first_state_instance] for inst in instances):
+                    if any(inst not in reachability[sdfg.cfg_id][first_state_instance] for inst in instances):
                         first_state_instance, last_state_instance = _get_dominator_and_postdominator(sdfg, instances)
                         # Declare in SDFG scope
                         # NOTE: Even if we declare the data at a common dominator, we keep the first and last node
@@ -818,20 +818,20 @@ def deallocate_arrays_in_scope(self, sdfg: SDFG, scope: Union[nodes.EntryNode, S
     def generate_code(self,
                       sdfg: SDFG,
                       schedule: Optional[dtypes.ScheduleType],
-                      sdfg_id: str = "") -> Tuple[str, str, Set[TargetCodeGenerator], Set[str]]:
+                      cfg_id: str = "") -> Tuple[str, str, Set[TargetCodeGenerator], Set[str]]:
         """ Generate frame code for a given SDFG, calling registered targets'
             code generation callbacks for them to generate their own code.
 
             :param sdfg: The SDFG to generate code for.
             :param schedule: The schedule the SDFG is currently located, or
                              None if the SDFG is top-level.
-            :param sdfg_id: An optional string id given to the SDFG label
+            :param cfg_id An optional string id given to the SDFG label
             :return: A tuple of the generated global frame code, local frame
                      code, and a set of targets that have been used in the
                      generation of this SDFG.
         """
-        if len(sdfg_id) == 0 and sdfg.sdfg_id != 0:
-            sdfg_id = '_%d' % sdfg.sdfg_id
+        if len(cfg_id) == 0 and sdfg.cfg_id != 0:
+            cfg_id = '_%d' % sdfg.cfg_id
 
         global_stream = CodeIOStream()
         callsite_stream = CodeIOStream()
diff --git a/dace/codegen/targets/intel_fpga.py b/dace/codegen/targets/intel_fpga.py
index 03a04fda41..f44d84c76c 100644
--- a/dace/codegen/targets/intel_fpga.py
+++ b/dace/codegen/targets/intel_fpga.py
@@ -580,7 +580,7 @@ def generate_module(self, sdfg, state, kernel_name, module_name, subgraph, param
         is_autorun = len(kernel_args_opencl) == 0
 
         # create a unique module name to prevent name clashes
-        module_function_name = "mod_" + str(sdfg.sdfg_id) + "_" + module_name
+        module_function_name = "mod_" + str(sdfg.cfg_id) + "_" + module_name
         # The official limit suggested by Intel for module name is 61. However, the compiler
         # can also append text to the module. Longest seen so far is
         # "_cra_slave_inst", which is 15 characters, so we restrict to
@@ -616,7 +616,7 @@ def generate_module(self, sdfg, state, kernel_name, module_name, subgraph, param
                         kernel_name, module_function_name,
                         ", ".join([""] + kernel_args_call) if len(kernel_args_call) > 0 else ""), sdfg, state_id)
                 if state.instrument == dtypes.InstrumentationType.FPGA:
-                    self.instrument_opencl_kernel(module_function_name, state_id, sdfg.sdfg_id, instrumentation_stream)
+                    self.instrument_opencl_kernel(module_function_name, state_id, sdfg.cfg_id, instrumentation_stream)
             else:
                 # We will generate a separate kernel for each PE. Adds host call
                 start, stop, skip = unrolled_loop.range.ranges[0]
@@ -639,7 +639,7 @@ def generate_module(self, sdfg, state, kernel_name, module_name, subgraph, param
                             ", ".join([""] + kernel_args_call[:-1]) if len(kernel_args_call) > 1 else ""), sdfg,
                         state_id)
                     if state.instrument == dtypes.InstrumentationType.FPGA:
-                        self.instrument_opencl_kernel(unrolled_module_name, state_id, sdfg.sdfg_id,
+                        self.instrument_opencl_kernel(unrolled_module_name, state_id, sdfg.cfg_id,
                                                       instrumentation_stream)
 
         # ----------------------------------------------------------------------
@@ -663,7 +663,7 @@ def generate_module(self, sdfg, state, kernel_name, module_name, subgraph, param
             # a function that will be used create a kernel multiple times
 
             # generate a unique name for this function
-            pe_function_name = "pe_" + str(sdfg.sdfg_id) + "_" + module_name + "_func"
+            pe_function_name = "pe_" + str(sdfg.cfg_id) + "_" + module_name + "_func"
             module_body_stream.write("inline void {}({}) {{".format(pe_function_name, ", ".join(kernel_args_opencl)),
                                      sdfg, state_id)
 
diff --git a/dace/codegen/targets/mlir/mlir.py b/dace/codegen/targets/mlir/mlir.py
index 6b1c5d4e5f..09cc69c72e 100644
--- a/dace/codegen/targets/mlir/mlir.py
+++ b/dace/codegen/targets/mlir/mlir.py
@@ -24,7 +24,7 @@ def node_dispatch_predicate(self, sdfg, state, node):
 
     def generate_node(self, sdfg, dfg, state_id, node, function_stream, callsite_stream):
         if self.node_dispatch_predicate(sdfg, dfg, node):
-            function_uid = str(sdfg.sdfg_id) + "_" + str(state_id) + "_" + str(dfg.node_id(node))
+            function_uid = str(sdfg.cfg_id) + "_" + str(state_id) + "_" + str(dfg.node_id(node))
             node.code.code = node.code.code.replace("mlir_entry", "mlir_entry_" + function_uid)
             node.label = node.name + "_" + function_uid
             self._codeobjects.append(CodeObject(node.name, node.code.code, "mlir", MLIRCodeGen, node.name + "_Source"))
diff --git a/dace/codegen/targets/rtl.py b/dace/codegen/targets/rtl.py
index 935615fad6..c9d13f0395 100644
--- a/dace/codegen/targets/rtl.py
+++ b/dace/codegen/targets/rtl.py
@@ -495,7 +495,7 @@ def generate_running_condition(self, tasklet):
         return evals
 
     def unique_name(self, node: nodes.RTLTasklet, state, sdfg):
-        return "{}_{}_{}_{}".format(node.name, sdfg.sdfg_id, sdfg.node_id(state), state.node_id(node))
+        return "{}_{}_{}_{}".format(node.name, sdfg.cfg_id, sdfg.node_id(state), state.node_id(node))
 
     def unparse_tasklet(self, sdfg: sdfg.SDFG, dfg: state.StateSubgraphView, state_id: int, node: nodes.Node,
                         function_stream: prettycode.CodeIOStream, callsite_stream: prettycode.CodeIOStream):
diff --git a/dace/codegen/targets/snitch.py b/dace/codegen/targets/snitch.py
index 1eb6f68a2a..a5978a5582 100644
--- a/dace/codegen/targets/snitch.py
+++ b/dace/codegen/targets/snitch.py
@@ -1041,9 +1041,9 @@ def write_and_resolve_expr(self, sdfg, memlet, nc, outname, inname, indices=None
                 raise NotImplementedError("Unimplemented reduction type " + str(redtype))
                 # fmt_str='inline {t} reduction_{sdfgid}_{stateid}_{nodeid}({t} {arga}, {t} {argb}) {{ {unparse_wcr_result} }}'
                 # fmt_str.format(t=dtype.ctype,
-                #   sdfgid=sdfg.sdfg_id, stateid=42, nodeid=43, unparse_wcr_result=cpp.unparse_cr_split(sdfg,memlet.wcr)[0],
+                #   sdfgid=sdfg.cfg_id, stateid=42, nodeid=43, unparse_wcr_result=cpp.unparse_cr_split(sdfg,memlet.wcr)[0],
                 #   arga=cpp.unparse_cr_split(sdfg,memlet.wcr)[1][0],argb=cpp.unparse_cr_split(sdfg,memlet.wcr)[1][1])
-                # sdfgid=sdfg.sdfg_id
+                # sdfgid=sdfg.cfg_id
                 # stateid=42
                 # nodeid=43
                 # return (f'reduction_{sdfgid}_{stateid}_{nodeid}(*({ptr}), {inname})')
diff --git a/dace/codegen/targets/xilinx.py b/dace/codegen/targets/xilinx.py
index 0c562c59c5..2c2802b615 100644
--- a/dace/codegen/targets/xilinx.py
+++ b/dace/codegen/targets/xilinx.py
@@ -692,7 +692,7 @@ def generate_host_function_body(self, sdfg: dace.SDFG, state: dace.SDFGState, ke
   hlslib::ocl::Event {kernel_name}_event = {kernel_name}_kernel.ExecuteTaskAsync({f'{kernel_deps_name}.begin(), {kernel_deps_name}.end()' if needs_synch else ''});
   all_events.push_back({kernel_name}_event);""", sdfg, sdfg.node_id(state))
         if state.instrument == dtypes.InstrumentationType.FPGA:
-            self.instrument_opencl_kernel(kernel_name, sdfg.node_id(state), sdfg.sdfg_id, instrumentation_stream)
+            self.instrument_opencl_kernel(kernel_name, sdfg.node_id(state), sdfg.cfg_id, instrumentation_stream)
 
     def generate_module(self, sdfg, state, kernel_name, name, subgraph, parameters, module_stream, entry_stream,
                         host_stream, instrumentation_stream):
@@ -837,12 +837,12 @@ def generate_module(self, sdfg, state, kernel_name, name, subgraph, parameters,
                 f"all_events.push_back(program.MakeKernel(\"{rtl_name}_top\"{', '.join([''] + [name for _, name, p, _ in parameters if not isinstance(p, dt.Stream)])}).ExecuteTaskAsync());",
                 sdfg, state_id, rtl_tasklet)
             if state.instrument == dtypes.InstrumentationType.FPGA:
-                self.instrument_opencl_kernel(rtl_name, state_id, sdfg.sdfg_id, instrumentation_stream)
+                self.instrument_opencl_kernel(rtl_name, state_id, sdfg.cfg_id, instrumentation_stream)
 
             return
 
         # create a unique module name to prevent name clashes
-        module_function_name = f"module_{name}_{sdfg.sdfg_id}"
+        module_function_name = f"module_{name}_{sdfg.cfg_id}"
 
         # Unrolling processing elements: if there first scope of the subgraph
         # is an unrolled map, generate a processing element for each iteration
@@ -950,7 +950,7 @@ def generate_module(self, sdfg, state, kernel_name, name, subgraph, parameters,
         self._dispatcher.defined_vars.exit_scope(subgraph)
 
     def rtl_tasklet_name(self, node: nodes.RTLTasklet, state, sdfg):
-        return "{}_{}_{}_{}".format(node.name, sdfg.sdfg_id, sdfg.node_id(state), state.node_id(node))
+        return "{}_{}_{}_{}".format(node.name, sdfg.cfg_id, sdfg.node_id(state), state.node_id(node))
 
     def generate_kernel_internal(self, sdfg: dace.SDFG, state: dace.SDFGState, kernel_name: str, predecessors: list,
                                  subgraphs: list, kernel_stream: CodeIOStream, state_host_header_stream: CodeIOStream,
diff --git a/dace/libraries/standard/nodes/reduce.py b/dace/libraries/standard/nodes/reduce.py
index 4e04a656fe..fa231c07f2 100644
--- a/dace/libraries/standard/nodes/reduce.py
+++ b/dace/libraries/standard/nodes/reduce.py
@@ -817,7 +817,7 @@ def expansion(node: 'Reduce', state: SDFGState, sdfg: SDFG):
         }
 
         local_storage = InLocalStorage()
-        local_storage.setup_match(sdfg, sdfg.sdfg_id, sdfg.nodes().index(state), in_local_storage_subgraph, 0)
+        local_storage.setup_match(sdfg, sdfg.cfg_id, sdfg.nodes().index(state), in_local_storage_subgraph, 0)
 
         local_storage.array = in_edge.data.data
         local_storage.apply(graph, sdfg)
@@ -825,7 +825,7 @@ def expansion(node: 'Reduce', state: SDFGState, sdfg: SDFG):
         sdfg.data(in_transient.data).storage = dtypes.StorageType.Register
 
         local_storage = OutLocalStorage()
-        local_storage.setup_match(sdfg, sdfg.sdfg_id, sdfg.nodes().index(state), out_local_storage_subgraph, 0)
+        local_storage.setup_match(sdfg, sdfg.cfg_id, sdfg.nodes().index(state), out_local_storage_subgraph, 0)
         local_storage.array = out_edge.data.data
         local_storage.apply(graph, sdfg)
         out_transient = local_storage._data_node
@@ -872,7 +872,7 @@ def expansion(node: 'Reduce', state: SDFGState, sdfg: SDFG):
         # itself and expand again.
         reduce_node.implementation = 'CUDA (block)'
         sub_expansion = ExpandReduceCUDABlock()
-        sub_expansion.setup_match(sdfg, sdfg.sdfg_id, sdfg.node_id(state), {}, 0)
+        sub_expansion.setup_match(sdfg, sdfg.cfg_id, sdfg.node_id(state), {}, 0)
         return sub_expansion.expansion(node=node, state=state, sdfg=sdfg)
         #return reduce_node.expand(sdfg, state)
 
diff --git a/dace/optimization/on_the_fly_map_fusion_tuner.py b/dace/optimization/on_the_fly_map_fusion_tuner.py
index f412abf4e6..981a77cc32 100644
--- a/dace/optimization/on_the_fly_map_fusion_tuner.py
+++ b/dace/optimization/on_the_fly_map_fusion_tuner.py
@@ -94,7 +94,7 @@ def evaluate(self, config, cutout, measurements: int, **kwargs) -> float:
         subgraph = helpers.subgraph_from_maps(sdfg=candidate, graph=candidate.start_state, map_entries=maps_)
 
         map_fusion = sg.SubgraphOTFFusion()
-        map_fusion.setup_match(subgraph, candidate.sdfg_id, candidate.node_id(candidate.start_state))
+        map_fusion.setup_match(subgraph, candidate.cfg_id, candidate.node_id(candidate.start_state))
         if map_fusion.can_be_applied(candidate.start_state, candidate):
             fuse_counter = map_fusion.apply(candidate.start_state, candidate)
 
@@ -120,7 +120,7 @@ def apply(self, config: Tuple[int, List[int]], label: str, **kwargs) -> None:
         subgraph = helpers.subgraph_from_maps(sdfg=sdfg, graph=state, map_entries=maps_)
 
         map_fusion = sg.SubgraphOTFFusion()
-        map_fusion.setup_match(subgraph, sdfg.sdfg_id, state_id)
+        map_fusion.setup_match(subgraph, sdfg.cfg_id, state_id)
         if map_fusion.can_be_applied(state, sdfg):
             fuse_counter = map_fusion.apply(state, sdfg)
             print(f"Fusing {fuse_counter} maps")
@@ -255,7 +255,7 @@ def transfer(sdfg: dace.SDFG, tuner, k: int = 5):
                         experiment_subgraph = helpers.subgraph_from_maps(sdfg=experiment_sdfg, graph=experiment_state, map_entries=experiment_maps)
 
                         map_fusion = sg.SubgraphOTFFusion()
-                        map_fusion.setup_match(experiment_subgraph, experiment_sdfg.sdfg_id,
+                        map_fusion.setup_match(experiment_subgraph, experiment_sdfg.cfg_id,
                                                experiment_sdfg.node_id(experiment_state))
                         if map_fusion.can_be_applied(experiment_state, experiment_sdfg):
                             try:
@@ -289,7 +289,7 @@ def transfer(sdfg: dace.SDFG, tuner, k: int = 5):
                     if best_pattern is not None:
                         subgraph = helpers.subgraph_from_maps(sdfg=nsdfg, graph=state, map_entries=best_pattern)
                         map_fusion = sg.SubgraphOTFFusion()
-                        map_fusion.setup_match(subgraph, nsdfg.sdfg_id, nsdfg.node_id(state))
+                        map_fusion.setup_match(subgraph, nsdfg.cfg_id, nsdfg.node_id(state))
                         actual_fuse_counter = map_fusion.apply(state, nsdfg)
 
                         best_pattern = None
diff --git a/dace/optimization/subgraph_fusion_tuner.py b/dace/optimization/subgraph_fusion_tuner.py
index ad84d57f78..a0f09038f3 100644
--- a/dace/optimization/subgraph_fusion_tuner.py
+++ b/dace/optimization/subgraph_fusion_tuner.py
@@ -67,7 +67,7 @@ def apply(self, config: Tuple[int, List[int]], label: str, **kwargs) -> None:
         subgraph = helpers.subgraph_from_maps(sdfg=sdfg, graph=state, map_entries=maps_)
 
         subgraph_fusion = sg.CompositeFusion()
-        subgraph_fusion.setup_match(subgraph, sdfg.sdfg_id, state_id)
+        subgraph_fusion.setup_match(subgraph, sdfg.cfg_id, state_id)
         subgraph_fusion.allow_tiling = True
         subgraph_fusion.schedule_innermaps = dace.ScheduleType.GPU_Device
         if subgraph_fusion.can_be_applied(sdfg, subgraph):
@@ -117,7 +117,7 @@ def evaluate(self, config, cutout, measurements: int, **kwargs) -> float:
         subgraph = helpers.subgraph_from_maps(sdfg=candidate, graph=candidate.start_state, map_entries=maps_)
 
         subgraph_fusion = sg.CompositeFusion()
-        subgraph_fusion.setup_match(subgraph, candidate.sdfg_id, candidate.node_id(candidate.start_state))
+        subgraph_fusion.setup_match(subgraph, candidate.cfg_id, candidate.node_id(candidate.start_state))
         subgraph_fusion.allow_tiling = True
         subgraph_fusion.schedule_innermaps = dace.ScheduleType.GPU_Device
         if subgraph_fusion.can_be_applied(candidate, subgraph):
@@ -260,7 +260,7 @@ def transfer(sdfg: dace.SDFG, tuner, k: int = 5):
                         experiment_subgraph = helpers.subgraph_from_maps(sdfg=experiment_sdfg, graph=experiment_state, map_entries=experiment_maps)
 
                         subgraph_fusion = sg.CompositeFusion()
-                        subgraph_fusion.setup_match(experiment_subgraph, experiment_sdfg.sdfg_id,
+                        subgraph_fusion.setup_match(experiment_subgraph, experiment_sdfg.cfg_id,
                                                     experiment_sdfg.node_id(experiment_state))
                         subgraph_fusion.allow_tiling = True
                         subgraph_fusion.schedule_innermaps = dace.ScheduleType.GPU_Device
@@ -295,7 +295,7 @@ def transfer(sdfg: dace.SDFG, tuner, k: int = 5):
                     if best_pattern is not None:
                         subgraph = helpers.subgraph_from_maps(sdfg=nsdfg, graph=state, map_entries=best_pattern)
                         subgraph_fusion = sg.CompositeFusion()
-                        subgraph_fusion.setup_match(subgraph, nsdfg.sdfg_id, nsdfg.node_id(state))
+                        subgraph_fusion.setup_match(subgraph, nsdfg.cfg_id, nsdfg.node_id(state))
                         subgraph_fusion.allow_tiling = True
                         subgraph_fusion.schedule_innermaps = dace.ScheduleType.GPU_Device
                         subgraph_fusion.apply(nsdfg)
diff --git a/dace/runtime/include/dace/perf/reporting.h b/dace/runtime/include/dace/perf/reporting.h
index 83cddc0ba2..9b9a59ab09 100644
--- a/dace/runtime/include/dace/perf/reporting.h
+++ b/dace/runtime/include/dace/perf/reporting.h
@@ -34,7 +34,7 @@ namespace perf {
         unsigned long int tend;
         size_t tid;
         struct _element_id {
-            int sdfg_id;
+            int cfg_id;
             int state_id;
             int el_id;
         } element_id;
@@ -80,7 +80,7 @@ namespace perf {
             const char *counter_name,
             unsigned long int counter_val,
             size_t tid,
-            int sdfg_id,
+            int cfg_id,
             int state_id,
             int el_id
         ) {
@@ -95,7 +95,7 @@ namespace perf {
                 tstart,
                 0,
                 tid,
-                { sdfg_id, state_id, el_id },
+                { cfg_id, state_id, el_id },
                 { "", counter_val }
             };
             strncpy(event.name, name, DACE_REPORT_EVENT_NAME_LEN);
@@ -113,7 +113,7 @@ namespace perf {
          * @param cat:      Comma separated categories the event belongs to.
          * @param tstart:   Start timestamp of the event.
          * @param tend:     End timestamp of the event.
-         * @param sdfg_id:  SDFG ID of the element associated with this event.
+         * @param cfg_id:  SDFG ID of the element associated with this event.
          * @param state_id: State ID of the element associated with this event.
          * @param el_id:    ID of the element associated with this event.
          */
@@ -122,13 +122,13 @@ namespace perf {
             const char *cat,
             unsigned long int tstart,
             unsigned long int tend,
-            int sdfg_id,
+            int cfg_id,
             int state_id,
             int el_id
         ) {
             std::thread::id thread_id = std::this_thread::get_id();
             size_t tid = std::hash<std::thread::id>{}(thread_id);
-            add_completion(name, cat, tstart, tend, tid, sdfg_id, state_id, el_id);
+            add_completion(name, cat, tstart, tend, tid, cfg_id, state_id, el_id);
         }
 
         void add_completion(
@@ -137,7 +137,7 @@ namespace perf {
             unsigned long int tstart,
             unsigned long int tend,
             size_t tid,
-            int sdfg_id,
+            int cfg_id,
             int state_id,
             int el_id
         ) {
@@ -149,7 +149,7 @@ namespace perf {
                 tstart,
                 tend,
                 tid,
-                { sdfg_id, state_id, el_id },
+                { cfg_id, state_id, el_id },
                 { "", 0 }
             };
             strncpy(event.name, name, DACE_REPORT_EVENT_NAME_LEN);
@@ -205,7 +205,7 @@ namespace perf {
                     ofs << "\"tid\": " << event.tid << ", ";
 
                     ofs << "\"args\": {";
-                    ofs << "\"sdfg_id\": " << event.element_id.sdfg_id;
+                    ofs << "\"cfg_id\": " << event.element_id.cfg_id;
 
                     if (event.element_id.state_id > -1) {
                         ofs << ", \"state_id\": ";
diff --git a/dace/sdfg/analysis/cutout.py b/dace/sdfg/analysis/cutout.py
index 94c86bb99c..9d5437dbee 100644
--- a/dace/sdfg/analysis/cutout.py
+++ b/dace/sdfg/analysis/cutout.py
@@ -72,7 +72,7 @@ def translate_transformation_into(self, transformation: Union[PatternTransformat
             old_state = self._base_sdfg.node(transformation.state_id)
             transformation.state_id = self.node_id(self.start_state)
             transformation._sdfg = self
-            transformation.sdfg_id = 0
+            transformation.cfg_id = 0
             for k in transformation.subgraph.keys():
                 old_node = old_state.node(transformation.subgraph[k])
                 try:
@@ -81,10 +81,10 @@ def translate_transformation_into(self, transformation: Union[PatternTransformat
                     # Ignore.
                     pass
         elif isinstance(transformation, MultiStateTransformation):
-            new_sdfg_id = self._in_translation[transformation.sdfg_id]
-            new_sdfg = self.cfg_list[new_sdfg_id]
+            new_cfg_id = self._in_translation[transformation.cfg_id]
+            new_sdfg = self.cfg_list[new_cfg_id]
             transformation._sdfg = new_sdfg
-            transformation.sdfg_id = new_sdfg_id
+            transformation.cfg_id = new_cfg_id
             for k in transformation.subgraph.keys():
                 old_state = self._base_sdfg.node(transformation.subgraph[k])
                 try:
@@ -140,8 +140,8 @@ def from_transformation(
             return cut_sdfg
 
         target_sdfg = sdfg
-        if transformation.sdfg_id >= 0 and target_sdfg.cfg_list is not None:
-            target_sdfg = target_sdfg.cfg_list[transformation.sdfg_id]
+        if transformation.cfg_id >= 0 and target_sdfg.cfg_list is not None:
+            target_sdfg = target_sdfg.cfg_list[transformation.cfg_id]
 
         if (all(isinstance(n, nd.Node) for n in affected_nodes) or
             isinstance(transformation, (SubgraphTransformation, SingleStateTransformation))):
@@ -291,8 +291,8 @@ def singlestate_cutout(cls,
 
         in_translation[state] = new_state
         out_translation[new_state] = state
-        in_translation[sdfg.sdfg_id] = cutout.sdfg_id
-        out_translation[cutout.sdfg_id] = sdfg.sdfg_id
+        in_translation[sdfg.cfg_id] = cutout.cfg_id
+        out_translation[cutout.cfg_id] = sdfg.cfg_id
 
         # Determine what counts as inputs / outputs to the cutout and make those data containers global / non-transient.
         if make_side_effects_global:
@@ -313,7 +313,7 @@ def singlestate_cutout(cls,
         for outer in outers:
             if isinstance(outer, nd.NestedSDFG):
                 inner: nd.NestedSDFG = in_translation[outer]
-                cutout._in_translation[outer.sdfg.sdfg_id] = inner.sdfg.sdfg_id
+                cutout._in_translation[outer.sdfg.cfg_id] = inner.sdfg.cfg_id
         _recursively_set_nsdfg_parents(cutout)
 
         return cutout
@@ -444,8 +444,8 @@ def multistate_cutout(cls,
                 cutout.add_node(new_el, is_start_state=(state == start_state))
                 new_el.parent = cutout
 
-        in_translation[sdfg.sdfg_id] = cutout.sdfg_id
-        out_translation[cutout.sdfg_id] = sdfg.sdfg_id
+        in_translation[sdfg.cfg_id] = cutout.cfg_id
+        out_translation[cutout.cfg_id] = sdfg.cfg_id
 
         # Check interstate edges for missing data descriptors.
         for e in cutout.edges():
@@ -495,8 +495,8 @@ def _transformation_determine_affected_nodes(
     affected_nodes = set()
 
     if isinstance(transformation, PatternTransformation):
-        if transformation.sdfg_id >= 0 and target_sdfg.cfg_list:
-            target_sdfg = target_sdfg.cfg_list[transformation.sdfg_id]
+        if transformation.cfg_id >= 0 and target_sdfg.cfg_list:
+            target_sdfg = target_sdfg.cfg_list[transformation.cfg_id]
 
         for k, _ in transformation._get_pattern_nodes().items():
             try:
@@ -526,8 +526,8 @@ def _transformation_determine_affected_nodes(
             # This is a transformation that affects a nested SDFG node, grab that NSDFG node.
             affected_nodes.add(target_sdfg.parent_nsdfg_node)
     else:
-        if transformation.sdfg_id >= 0 and target_sdfg.cfg_list:
-            target_sdfg = target_sdfg.cfg_list[transformation.sdfg_id]
+        if transformation.cfg_id >= 0 and target_sdfg.cfg_list:
+            target_sdfg = target_sdfg.cfg_list[transformation.cfg_id]
 
         subgraph = transformation.get_subgraph(target_sdfg)
         for n in subgraph.nodes():
@@ -575,7 +575,7 @@ def _reduce_in_configuration(state: SDFGState, affected_nodes: Set[nd.Node], use
     # For the given state, determine what should count as the input configuration if we were to cut out the entire
     # state.
     state_reachability_dict = StateReachability().apply_pass(state.parent, None)
-    state_reach = state_reachability_dict[state.parent.sdfg_id]
+    state_reach = state_reachability_dict[state.parent.cfg_id]
     reaching_cutout: Set[SDFGState] = set()
     for k, v in state_reach.items():
         if state in v:
@@ -900,9 +900,9 @@ def _determine_cutout_reachability(
              set contains the states that can be reached from the cutout.
     """
     if state_reach is None:
-        original_sdfg_id = out_translation[ct.sdfg_id]
-        state_reachability_dict = StateReachability().apply_pass(sdfg.cfg_list[original_sdfg_id], None)
-        state_reach = state_reachability_dict[original_sdfg_id]
+        original_cfg_id = out_translation[ct.cfg_id]
+        state_reachability_dict = StateReachability().apply_pass(sdfg.cfg_list[original_cfg_id], None)
+        state_reach = state_reachability_dict[original_cfg_id]
     inverse_cutout_reach: Set[SDFGState] = set()
     cutout_reach: Set[SDFGState] = set()
     cutout_states = set(ct.states())
diff --git a/dace/sdfg/analysis/schedule_tree/sdfg_to_tree.py b/dace/sdfg/analysis/schedule_tree/sdfg_to_tree.py
index 51871e6512..c10c74f42c 100644
--- a/dace/sdfg/analysis/schedule_tree/sdfg_to_tree.py
+++ b/dace/sdfg/analysis/schedule_tree/sdfg_to_tree.py
@@ -351,7 +351,7 @@ def replace_symbols_until_set(nsdfg: dace.nodes.NestedSDFG):
     """
     mapping = nsdfg.symbol_mapping
     sdfg = nsdfg.sdfg
-    reachable_states = StateReachability().apply_pass(sdfg, {})[sdfg.sdfg_id]
+    reachable_states = StateReachability().apply_pass(sdfg, {})[sdfg.cfg_id]
     redefined_symbols: Dict[SDFGState, Set[str]] = defaultdict(set)
 
     # Collect redefined symbols
diff --git a/dace/sdfg/nodes.py b/dace/sdfg/nodes.py
index b1a95b6e32..a455303326 100644
--- a/dace/sdfg/nodes.py
+++ b/dace/sdfg/nodes.py
@@ -1372,11 +1372,11 @@ def expand(self, sdfg, state, *args, **kwargs) -> str:
         if implementation not in self.implementations.keys():
             raise KeyError("Unknown implementation for node {}: {}".format(type(self).__name__, implementation))
         transformation_type = type(self).implementations[implementation]
-        sdfg_id = sdfg.sdfg_id
+        cfg_id = sdfg.cfg_id
         state_id = sdfg.nodes().index(state)
         subgraph = {transformation_type._match_node: state.node_id(self)}
         transformation: ExpandTransformation = transformation_type()
-        transformation.setup_match(sdfg, sdfg_id, state_id, subgraph, 0)
+        transformation.setup_match(sdfg, cfg_id, state_id, subgraph, 0)
         if not transformation.can_be_applied(state, 0, sdfg):
             raise RuntimeError("Library node expansion applicability check failed.")
         sdfg.append_transformation(transformation)
diff --git a/dace/sdfg/propagation.py b/dace/sdfg/propagation.py
index 18c4d7a192..1c038dd2e4 100644
--- a/dace/sdfg/propagation.py
+++ b/dace/sdfg/propagation.py
@@ -732,7 +732,7 @@ def propagate_states(sdfg, concretize_dynamic_unbounded=False) -> None:
 
     :param sdfg: The SDFG to annotate.
     :param concretize_dynamic_unbounded: If True, we annotate dyncamic unbounded states with symbols of the
-                                         form "num_execs_{sdfg_id}_{loop_start_state_id}". Hence, for each
+                                         form "num_execs_{cfg_id}_{loop_start_state_id}". Hence, for each
                                          unbounded loop its states will have the same number of symbolic executions.
     :note: This operates on the SDFG in-place.
     """
@@ -909,7 +909,7 @@ def propagate_states(sdfg, concretize_dynamic_unbounded=False) -> None:
                             # We can always assume these symbols to be non-negative.
                             traversal_q.append(
                                 (unannotated_loop_edge.dst,
-                                 Symbol(f'num_execs_{sdfg.sdfg_id}_{sdfg.node_id(unannotated_loop_edge.dst)}',
+                                 Symbol(f'num_execs_{sdfg.cfg_id}_{sdfg.node_id(unannotated_loop_edge.dst)}',
                                         nonnegative=True), False, itvar_stack))
                         else:
                             # Propagate dynamic unbounded.
diff --git a/dace/sdfg/utils.py b/dace/sdfg/utils.py
index 1405901802..a62f88a6a2 100644
--- a/dace/sdfg/utils.py
+++ b/dace/sdfg/utils.py
@@ -1215,7 +1215,7 @@ def fuse_states(sdfg: SDFG, permissive: bool = False, progress: bool = None) ->
     start = time.time()
 
     for sd in sdfg.all_sdfgs_recursive():
-        id = sd.sdfg_id
+        id = sd.cfg_id
 
         for cfg in sd.all_control_flow_regions():
             while True:
@@ -1258,8 +1258,8 @@ def inline_loop_blocks(sdfg: SDFG, permissive: bool = False, progress: bool = No
     for _block, _graph in optional_progressbar(reversed(blocks), title='Inlining Loops',
                                                n=len(blocks), progress=progress):
         block: ControlFlowBlock = _block
-        graph: SomeGraphT = _graph
-        id = block.sdfg.sdfg_id
+        graph: GraphT = _graph
+        id = block.sdfg.cfg_id
 
         # We have to reevaluate every time due to changing IDs
         block_id = graph.node_id(block)
@@ -1298,7 +1298,7 @@ def inline_sdfgs(sdfg: SDFG, permissive: bool = False, progress: bool = None, mu
     nsdfgs = [(n, p) for n, p in sdfg.all_nodes_recursive() if isinstance(n, NestedSDFG)]
 
     for node, state in optional_progressbar(reversed(nsdfgs), title='Inlining SDFGs', n=len(nsdfgs), progress=progress):
-        id = node.sdfg.sdfg_id
+        id = node.sdfg.cfg_id
         sd = state.parent
 
         # We have to reevaluate every time due to changing IDs
@@ -1411,7 +1411,7 @@ def unique_node_repr(graph: Union[SDFGState, ScopeSubgraphView], node: Node) ->
     # Build a unique representation
     sdfg = graph.parent
     state = graph if isinstance(graph, SDFGState) else graph._graph
-    return str(sdfg.sdfg_id) + "_" + str(sdfg.node_id(state)) + "_" + str(state.node_id(node))
+    return str(sdfg.cfg_id) + "_" + str(sdfg.node_id(state)) + "_" + str(state.node_id(node))
 
 
 def is_nonfree_sym_dependent(node: nd.AccessNode, desc: dt.Data, state: SDFGState, fsymbols: Set[str]) -> bool:
diff --git a/dace/sdfg/validation.py b/dace/sdfg/validation.py
index 9feda8259c..d05cca009d 100644
--- a/dace/sdfg/validation.py
+++ b/dace/sdfg/validation.py
@@ -827,7 +827,7 @@ def _getlineinfo(self, obj) -> str:
         return f'File "{lineinfo.filename}"'
 
     def to_json(self):
-        return dict(message=self.message, sdfg_id=self.sdfg.sdfg_id, state_id=self.state_id)
+        return dict(message=self.message, cfg_id=self.sdfg.cfg_id, state_id=self.state_id)
 
     def __str__(self):
         if self.state_id is not None:
@@ -860,7 +860,7 @@ def __init__(self, message: str, sdfg: 'SDFG', edge_id: int):
         self.path = None
 
     def to_json(self):
-        return dict(message=self.message, sdfg_id=self.sdfg.sdfg_id, isedge_id=self.edge_id)
+        return dict(message=self.message, cfg_id=self.sdfg.cfg_id, isedge_id=self.edge_id)
 
     def __str__(self):
         if self.edge_id is not None:
@@ -907,7 +907,7 @@ def __init__(self, message: str, sdfg: 'SDFG', state_id: int, node_id: int):
         self.path = None
 
     def to_json(self):
-        return dict(message=self.message, sdfg_id=self.sdfg.sdfg_id, state_id=self.state_id, node_id=self.node_id)
+        return dict(message=self.message, cfg_id=self.sdfg.cfg_id, state_id=self.state_id, node_id=self.node_id)
 
     def __str__(self):
         state = self.sdfg.node(self.state_id)
@@ -952,7 +952,7 @@ def __init__(self, message: str, sdfg: 'SDFG', state_id: int, edge_id: int):
         self.path = None
 
     def to_json(self):
-        return dict(message=self.message, sdfg_id=self.sdfg.sdfg_id, state_id=self.state_id, edge_id=self.edge_id)
+        return dict(message=self.message, cfg_id=self.sdfg.cfg_id, state_id=self.state_id, edge_id=self.edge_id)
 
     def __str__(self):
         state = self.sdfg.node(self.state_id)
diff --git a/dace/sdfg/work_depth_analysis/helpers.py b/dace/sdfg/work_depth_analysis/helpers.py
index e592fd11b5..31d3661509 100644
--- a/dace/sdfg/work_depth_analysis/helpers.py
+++ b/dace/sdfg/work_depth_analysis/helpers.py
@@ -25,18 +25,18 @@ def length(self) -> int:
 UUID_SEPARATOR = '/'
 
 
-def ids_to_string(sdfg_id, state_id=-1, node_id=-1, edge_id=-1):
-    return (str(sdfg_id) + UUID_SEPARATOR + str(state_id) + UUID_SEPARATOR + str(node_id) + UUID_SEPARATOR +
+def ids_to_string(cfg_id, state_id=-1, node_id=-1, edge_id=-1):
+    return (str(cfg_id) + UUID_SEPARATOR + str(state_id) + UUID_SEPARATOR + str(node_id) + UUID_SEPARATOR +
             str(edge_id))
 
 
 def get_uuid(element, state=None):
     if isinstance(element, SDFG):
-        return ids_to_string(element.sdfg_id)
+        return ids_to_string(element.cfg_id)
     elif isinstance(element, SDFGState):
-        return ids_to_string(element.parent.sdfg_id, element.parent.node_id(element))
+        return ids_to_string(element.parent.cfg_id, element.parent.node_id(element))
     elif isinstance(element, nodes.Node):
-        return ids_to_string(state.parent.sdfg_id, state.parent.node_id(state), state.node_id(element))
+        return ids_to_string(state.parent.cfg_id, state.parent.node_id(state), state.node_id(element))
     else:
         return ids_to_string(-1)
 
diff --git a/dace/sourcemap.py b/dace/sourcemap.py
index dcac2b6b73..0f7215bf4d 100644
--- a/dace/sourcemap.py
+++ b/dace/sourcemap.py
@@ -11,13 +11,13 @@
 
 
 class SdfgLocation:
-    def __init__(self, sdfg_id, state_id, node_ids):
-        self.sdfg_id = sdfg_id
+    def __init__(self, cfg_id, state_id, node_ids):
+        self.cfg_id = cfg_id
         self.state_id = state_id
         self.node_ids = node_ids
 
     def printer(self):
-        print("SDFG {}:{}:{}".format(self.sdfg_id, self.state_id, self.node_ids))
+        print("SDFG {}:{}:{}".format(self.cfg_id, self.state_id, self.node_ids))
 
 
 def create_folder(path_str: str):
@@ -204,12 +204,12 @@ def create_mapping(self, node: SdfgLocation, line_num: int):
             :param node: A node which will map to the line number
             :param line_num: The line number to add to the mapping
         """
-        if node.sdfg_id not in self.map:
-            self.map[node.sdfg_id] = {}
-        if node.state_id not in self.map[node.sdfg_id]:
-            self.map[node.sdfg_id][node.state_id] = {}
+        if node.cfg_id not in self.map:
+            self.map[node.cfg_id] = {}
+        if node.state_id not in self.map[node.cfg_id]:
+            self.map[node.cfg_id][node.state_id] = {}
 
-        state = self.map[node.sdfg_id][node.state_id]
+        state = self.map[node.cfg_id][node.state_id]
 
         for node_id in node.node_ids:
             if node_id not in state:
@@ -329,28 +329,28 @@ def sorter(self):
                            'end_line'], n['debuginfo']['end_column'])))
         return db_sorted
 
-    def make_info(self, debuginfo, node_id: int, state_id: int, sdfg_id: int) -> dict:
+    def make_info(self, debuginfo, node_id: int, state_id: int, cfg_id: int) -> dict:
         """ Creates an object for the current node with
             the most important information
 
             :param debuginfo: JSON object of the debuginfo of the node
             :param node_id: ID of the node
             :param state_id: ID of the state
-            :param sdfg_id: ID of the sdfg
+            :param cfg_id: ID of the sdfg
             :return: Dictionary with a debuginfo JSON object and the identifiers
         """
-        return {"debuginfo": debuginfo, "sdfg_id": sdfg_id, "state_id": state_id, "node_id": node_id}
+        return {"debuginfo": debuginfo, "cfg_id": cfg_id, "state_id": state_id, "node_id": node_id}
 
-    def sdfg_debuginfo(self, graph, sdfg_id: int = 0, state_id: int = 0):
+    def sdfg_debuginfo(self, graph, cfg_id: int = 0, state_id: int = 0):
         """ Recursively retracts all debuginfo from the nodes
 
             :param graph: An SDFG or SDFGState to check for nodes
-            :param sdfg_id: Id of the current SDFG/NestedSDFG
+            :param cfg_id: Id of the current SDFG/NestedSDFG
             :param state_id: Id of the current SDFGState
             :return: list of debuginfo with the node identifiers
         """
-        if sdfg_id is None:
-            sdfg_id = 0
+        if cfg_id is None:
+            cfg_id = 0
 
         mapping = []
         for id, node in enumerate(graph.nodes()):
@@ -360,19 +360,19 @@ def sdfg_debuginfo(self, graph, sdfg_id: int = 0, state_id: int = 0):
                 (nodes.AccessNode, nodes.Tasklet, nodes.LibraryNode, nodes.Map)) and node.debuginfo is not None:
 
                 dbinfo = node.debuginfo.to_json()
-                mapping.append(self.make_info(dbinfo, id, state_id, sdfg_id))
+                mapping.append(self.make_info(dbinfo, id, state_id, cfg_id))
 
             elif isinstance(node, (nodes.MapEntry, nodes.MapExit)) and node.map.debuginfo is not None:
                 dbinfo = node.map.debuginfo.to_json()
-                mapping.append(self.make_info(dbinfo, id, state_id, sdfg_id))
+                mapping.append(self.make_info(dbinfo, id, state_id, cfg_id))
 
             # State no debuginfo, recursive call
             elif isinstance(node, state.SDFGState):
-                mapping += self.sdfg_debuginfo(node, sdfg_id, graph.node_id(node))
+                mapping += self.sdfg_debuginfo(node, cfg_id, graph.node_id(node))
 
             # Sdfg not using debuginfo, recursive call
             elif isinstance(node, nodes.NestedSDFG):
-                mapping += self.sdfg_debuginfo(node.sdfg, node.sdfg.sdfg_id, state_id)
+                mapping += self.sdfg_debuginfo(node.sdfg, node.sdfg.cfg_id, state_id)
 
         return mapping
 
@@ -394,7 +394,7 @@ def create_mapping(self, range_dict=None):
                         self.map[src_file][str(line)] = []
 
                     self.map[src_file][str(line)].append({
-                        "sdfg_id": node["sdfg_id"],
+                        "cfg_id": node["cfg_id"],
                         "state_id": node["state_id"],
                         "node_id": node["node_id"]
                     })
diff --git a/dace/transformation/auto/auto_optimize.py b/dace/transformation/auto/auto_optimize.py
index 08d62048b5..60a35c565d 100644
--- a/dace/transformation/auto/auto_optimize.py
+++ b/dace/transformation/auto/auto_optimize.py
@@ -503,7 +503,7 @@ def make_transients_persistent(sdfg: SDFG,
         for aname in (persistent - not_persistent):
             nsdfg.arrays[aname].lifetime = dtypes.AllocationLifetime.Persistent
 
-        result[nsdfg.sdfg_id] = (persistent - not_persistent)
+        result[nsdfg.cfg_id] = (persistent - not_persistent)
 
     if device == dtypes.DeviceType.GPU:
         # Reset nonatomic WCR edges
diff --git a/dace/transformation/dataflow/double_buffering.py b/dace/transformation/dataflow/double_buffering.py
index 6efe6543ca..bb42aa57ac 100644
--- a/dace/transformation/dataflow/double_buffering.py
+++ b/dace/transformation/dataflow/double_buffering.py
@@ -37,7 +37,7 @@ def can_be_applied(self, graph, expr_index, sdfg, permissive=False):
 
         # Verify the map can be transformed to a for-loop
         m2for = MapToForLoop()
-        m2for.setup_match(sdfg, sdfg.sdfg_id, self.state_id,
+        m2for.setup_match(sdfg, sdfg.cfg_id, self.state_id,
                           {MapToForLoop.map_entry: self.subgraph[DoubleBuffering.map_entry]}, expr_index)
         if not m2for.can_be_applied(graph, expr_index, sdfg, permissive):
             return False
@@ -110,7 +110,7 @@ def apply(self, graph: sd.SDFGState, sdfg: sd.SDFG):
         ##############################
         # Turn map into for loop
         map_to_for = MapToForLoop()
-        map_to_for.setup_match(sdfg, self.sdfg_id, self.state_id,
+        map_to_for.setup_match(sdfg, self.cfg_id, self.state_id,
                                {MapToForLoop.map_entry: graph.node_id(self.map_entry)}, self.expr_index)
         nsdfg_node, nstate = map_to_for.apply(graph, sdfg)
 
diff --git a/dace/transformation/dataflow/mapreduce.py b/dace/transformation/dataflow/mapreduce.py
index c24c4d2829..d111cc32b6 100644
--- a/dace/transformation/dataflow/mapreduce.py
+++ b/dace/transformation/dataflow/mapreduce.py
@@ -209,14 +209,14 @@ def apply(self, graph: SDFGState, sdfg: SDFG):
         # To apply, collapse the second map and then fuse the two resulting maps
         map_collapse = MapCollapse()
         map_collapse.setup_match(
-            sdfg, self.sdfg_id, self.state_id, {
+            sdfg, self.cfg_id, self.state_id, {
                 MapCollapse.outer_map_entry: graph.node_id(self.rmap_out_entry),
                 MapCollapse.inner_map_entry: graph.node_id(self.rmap_in_entry),
             }, 0)
         map_entry, _ = map_collapse.apply(graph, sdfg)
 
         map_fusion = MapFusion()
-        map_fusion.setup_match(sdfg, self.sdfg_id, self.state_id, {
+        map_fusion.setup_match(sdfg, self.cfg_id, self.state_id, {
             MapFusion.first_map_exit: graph.node_id(self.tmap_exit),
             MapFusion.second_map_entry: graph.node_id(map_entry),
         }, 0)
diff --git a/dace/transformation/dataflow/mpi.py b/dace/transformation/dataflow/mpi.py
index b6a467dc21..c44c21e9b9 100644
--- a/dace/transformation/dataflow/mpi.py
+++ b/dace/transformation/dataflow/mpi.py
@@ -102,9 +102,9 @@ def apply(self, graph: SDFGState, sdfg: SDFG):
         rangeexpr = str(map_entry.map.range.num_elements())
 
         stripmine_subgraph = {StripMining.map_entry: self.subgraph[MPITransformMap.map_entry]}
-        sdfg_id = sdfg.sdfg_id
+        cfg_id = sdfg.cfg_id
         stripmine = StripMining()
-        stripmine.setup_match(sdfg, sdfg_id, self.state_id, stripmine_subgraph, self.expr_index)
+        stripmine.setup_match(sdfg, cfg_id, self.state_id, stripmine_subgraph, self.expr_index)
         stripmine.dim_idx = -1
         stripmine.new_dim_prefix = "mpi"
         stripmine.tile_size = "(" + rangeexpr + "/__dace_comm_size)"
@@ -128,9 +128,9 @@ def apply(self, graph: SDFGState, sdfg: SDFG):
                 LocalStorage.node_a: graph.node_id(outer_map),
                 LocalStorage.node_b: self.subgraph[MPITransformMap.map_entry]
             }
-            sdfg_id = sdfg.sdfg_id
+            cfg_id = sdfg.cfg_id
             in_local_storage = InLocalStorage()
-            in_local_storage.setup_match(sdfg, sdfg_id, self.state_id, in_local_storage_subgraph, self.expr_index)
+            in_local_storage.setup_match(sdfg, cfg_id, self.state_id, in_local_storage_subgraph, self.expr_index)
             in_local_storage.array = e.data.data
             in_local_storage.apply(graph, sdfg)
 
@@ -146,8 +146,8 @@ def apply(self, graph: SDFGState, sdfg: SDFG):
                 LocalStorage.node_a: graph.node_id(in_map_exit),
                 LocalStorage.node_b: graph.node_id(out_map_exit)
             }
-            sdfg_id = sdfg.sdfg_id
+            cfg_id = sdfg.cfg_id
             outlocalstorage = OutLocalStorage()
-            outlocalstorage.setup_match(sdfg, sdfg_id, self.state_id, outlocalstorage_subgraph, self.expr_index)
+            outlocalstorage.setup_match(sdfg, cfg_id, self.state_id, outlocalstorage_subgraph, self.expr_index)
             outlocalstorage.array = name
             outlocalstorage.apply(graph, sdfg)
diff --git a/dace/transformation/dataflow/reduce_expansion.py b/dace/transformation/dataflow/reduce_expansion.py
index dd93e42654..3f6cc1249b 100644
--- a/dace/transformation/dataflow/reduce_expansion.py
+++ b/dace/transformation/dataflow/reduce_expansion.py
@@ -183,7 +183,7 @@ def expand(self, sdfg: SDFG, graph: SDFGState, reduce_node):
                 LocalStorage.node_a: nsdfg.sdfg.nodes()[0].nodes().index(inner_exit),
                 LocalStorage.node_b: nsdfg.sdfg.nodes()[0].nodes().index(outer_exit)
             }
-            nsdfg_id = nsdfg.sdfg.cfg_list.index(nsdfg.sdfg)
+            nsdfg_id = nsdfg.sdfg.cfg_id
             nstate_id = 0
             local_storage = OutLocalStorage()
             local_storage.setup_match(nsdfg.sdfg, nsdfg_id, nstate_id, local_storage_subgraph, 0)
@@ -215,7 +215,7 @@ def expand(self, sdfg: SDFG, graph: SDFGState, reduce_node):
                 LocalStorage.node_b: nsdfg.sdfg.nodes()[0].nodes().index(inner_entry)
             }
 
-            nsdfg_id = nsdfg.sdfg.cfg_list.index(nsdfg.sdfg)
+            nsdfg_id = nsdfg.sdfg.cfg_id
             nstate_id = 0
             local_storage = InLocalStorage()
             local_storage.setup_match(nsdfg.sdfg, nsdfg_id, nstate_id, local_storage_subgraph, 0)
@@ -229,7 +229,7 @@ def expand(self, sdfg: SDFG, graph: SDFGState, reduce_node):
         # inline fuse back our nested SDFG
         from dace.transformation.interstate import InlineSDFG
         inline_sdfg = InlineSDFG()
-        inline_sdfg.setup_match(sdfg, sdfg.sdfg_id, sdfg.node_id(graph), {InlineSDFG.nested_sdfg: graph.node_id(nsdfg)},
+        inline_sdfg.setup_match(sdfg, sdfg.cfg_id, sdfg.node_id(graph), {InlineSDFG.nested_sdfg: graph.node_id(nsdfg)},
                                 0)
         inline_sdfg.apply(graph, sdfg)
 
diff --git a/dace/transformation/dataflow/tiling.py b/dace/transformation/dataflow/tiling.py
index cd15997ca5..bfa899e71a 100644
--- a/dace/transformation/dataflow/tiling.py
+++ b/dace/transformation/dataflow/tiling.py
@@ -54,7 +54,7 @@ def apply(self, graph: SDFGState, sdfg: SDFG):
         from dace.transformation.dataflow.map_collapse import MapCollapse
         from dace.transformation.dataflow.strip_mining import StripMining
         stripmine_subgraph = {StripMining.map_entry: self.subgraph[MapTiling.map_entry]}
-        sdfg_id = sdfg.sdfg_id
+        cfg_id = sdfg.cfg_id
         last_map_entry = None
         removed_maps = 0
 
@@ -82,7 +82,7 @@ def apply(self, graph: SDFGState, sdfg: SDFG):
                 continue
 
             stripmine = StripMining()
-            stripmine.setup_match(sdfg, sdfg_id, self.state_id, stripmine_subgraph, self.expr_index)
+            stripmine.setup_match(sdfg, cfg_id, self.state_id, stripmine_subgraph, self.expr_index)
 
             # Special case: Tile size of 1 should be omitted from inner map
             if tile_size == 1 and tile_stride == 1 and self.tile_trivial == False:
@@ -113,7 +113,7 @@ def apply(self, graph: SDFGState, sdfg: SDFG):
                     MapCollapse.inner_map_entry: graph.node_id(new_map_entry)
                 }
                 mapcollapse = MapCollapse()
-                mapcollapse.setup_match(sdfg, sdfg_id, self.state_id, mapcollapse_subgraph, 0)
+                mapcollapse.setup_match(sdfg, cfg_id, self.state_id, mapcollapse_subgraph, 0)
                 mapcollapse.apply(graph, sdfg)
             last_map_entry = graph.in_edges(map_entry)[0].src
         return last_map_entry
diff --git a/dace/transformation/interstate/fpga_transform_sdfg.py b/dace/transformation/interstate/fpga_transform_sdfg.py
index 527cc96284..954c88d726 100644
--- a/dace/transformation/interstate/fpga_transform_sdfg.py
+++ b/dace/transformation/interstate/fpga_transform_sdfg.py
@@ -34,7 +34,7 @@ def can_be_applied(self, graph, expr_index, sdfg, permissive=False):
         # Condition match depends on matching FPGATransformState for each state
         for state_id, state in enumerate(sdfg.nodes()):
             fps = FPGATransformState()
-            fps.setup_match(sdfg, graph.sdfg_id, -1, {FPGATransformState.state: state_id}, 0)
+            fps.setup_match(sdfg, graph.cfg_id, -1, {FPGATransformState.state: state_id}, 0)
             if not fps.can_be_applied(sdfg, expr_index, sdfg):
                 return False
 
@@ -45,13 +45,13 @@ def apply(self, _, sdfg):
         from dace.transformation.interstate import NestSDFG
         from dace.transformation.interstate import FPGATransformState
 
-        sdfg_id = sdfg.sdfg_id
+        cfg_id = sdfg.cfg_id
         nesting = NestSDFG()
-        nesting.setup_match(sdfg, sdfg_id, -1, {}, self.expr_index)
+        nesting.setup_match(sdfg, cfg_id, -1, {}, self.expr_index)
         nesting.promote_global_trans = self.promote_global_trans
         nesting.apply(sdfg, sdfg)
 
         # The state ID is zero since we applied NestSDFG and have only one state in the new SDFG
         fpga_transform = FPGATransformState()
-        fpga_transform.setup_match(sdfg, sdfg_id, -1, {FPGATransformState.state: 0}, self.expr_index)
+        fpga_transform.setup_match(sdfg, cfg_id, -1, {FPGATransformState.state: 0}, self.expr_index)
         fpga_transform.apply(sdfg, sdfg)
diff --git a/dace/transformation/optimizer.py b/dace/transformation/optimizer.py
index 4cb4997ef4..d1d86d7abf 100644
--- a/dace/transformation/optimizer.py
+++ b/dace/transformation/optimizer.py
@@ -102,11 +102,11 @@ def get_actions(actions, graph, match):
             return actions
 
         def get_dataflow_actions(actions, sdfg, match):
-            graph = sdfg.cfg_list[match.sdfg_id].nodes()[match.state_id]
+            graph = sdfg.cfg_list[match.cfg_id].nodes()[match.state_id]
             return get_actions(actions, graph, match)
 
         def get_stateflow_actions(actions, sdfg, match):
-            graph = sdfg.cfg_list[match.sdfg_id]
+            graph = sdfg.cfg_list[match.cfg_id]
             return get_actions(actions, graph, match)
 
         actions = dict()
@@ -207,7 +207,7 @@ def optimize(self):
             ui_options = sorted(self.get_pattern_matches())
             ui_options_idx = 0
             for pattern_match in ui_options:
-                sdfg = self.sdfg.cfg_list[pattern_match.sdfg_id]
+                sdfg = self.sdfg.cfg_list[pattern_match.cfg_id]
                 pattern_match._sdfg = sdfg
                 print('%d. Transformation %s' % (ui_options_idx, pattern_match.print_match(sdfg)))
                 ui_options_idx += 1
@@ -238,7 +238,7 @@ def optimize(self):
                 break
 
             match_id = (str(occurrence) if pattern_name is None else '%s$%d' % (pattern_name, occurrence))
-            sdfg = self.sdfg.cfg_list[pattern_match.sdfg_id]
+            sdfg = self.sdfg.cfg_list[pattern_match.cfg_id]
             graph = sdfg.node(pattern_match.state_id) if pattern_match.state_id >= 0 else sdfg
             pattern_match._sdfg = sdfg
             print('You selected (%s) pattern %s with parameters %s' %
diff --git a/dace/transformation/passes/analysis.py b/dace/transformation/passes/analysis.py
index d6b235a876..cccfbf10a3 100644
--- a/dace/transformation/passes/analysis.py
+++ b/dace/transformation/passes/analysis.py
@@ -45,7 +45,7 @@ def apply_pass(self, top_sdfg: SDFG, _) -> Dict[int, Dict[SDFGState, Set[SDFGSta
             for n, v in reachable_nodes(sdfg.nx):
                 result[n] = set(v)
 
-            reachable[sdfg.sdfg_id] = result
+            reachable[sdfg.cfg_id] = result
 
         return reachable
 
@@ -130,7 +130,7 @@ def apply_pass(self, top_sdfg: SDFG,
                     edge_readset = oedge.data.read_symbols() - adesc
                     edge_writeset = set(oedge.data.assignments.keys())
                     result[oedge] = (edge_readset, edge_writeset)
-            top_result[sdfg.sdfg_id] = result
+            top_result[sdfg.cfg_id] = result
         return top_result
 
 
@@ -174,7 +174,7 @@ def apply_pass(self, top_sdfg: SDFG, _) -> Dict[int, Dict[SDFGState, Tuple[Set[s
                     result[e.src][0].update(fsyms)
                     result[e.dst][0].update(fsyms)
 
-            top_result[sdfg.sdfg_id] = result
+            top_result[sdfg.cfg_id] = result
         return top_result
 
 
@@ -212,7 +212,7 @@ def apply_pass(self, top_sdfg: SDFG, _) -> Dict[int, Dict[str, Set[SDFGState]]]:
                 for access in fsyms:
                     result[access].update({e.src, e.dst})
 
-            top_result[sdfg.sdfg_id] = result
+            top_result[sdfg.cfg_id] = result
         return top_result
 
 
@@ -248,7 +248,7 @@ def apply_pass(self, top_sdfg: SDFG,
                         result[anode.data][state][1].add(anode)
                     if state.out_degree(anode) > 0:
                         result[anode.data][state][0].add(anode)
-            top_result[sdfg.sdfg_id] = result
+            top_result[sdfg.cfg_id] = result
         return top_result
 
 
@@ -313,8 +313,8 @@ def apply_pass(self, sdfg: SDFG, pipeline_results: Dict[str, Any]) -> Dict[int,
             all_doms = cfg.all_dominators(sdfg, idom)
             symbol_access_sets: Dict[Union[SDFGState, Edge[InterstateEdge]],
                                      Tuple[Set[str],
-                                           Set[str]]] = pipeline_results[SymbolAccessSets.__name__][sdfg.sdfg_id]
-            state_reach: Dict[SDFGState, Set[SDFGState]] = pipeline_results[StateReachability.__name__][sdfg.sdfg_id]
+                                           Set[str]]] = pipeline_results[SymbolAccessSets.__name__][sdfg.cfg_id]
+            state_reach: Dict[SDFGState, Set[SDFGState]] = pipeline_results[StateReachability.__name__][sdfg.cfg_id]
 
             for read_loc, (reads, _) in symbol_access_sets.items():
                 for sym in reads:
@@ -352,7 +352,7 @@ def apply_pass(self, sdfg: SDFG, pipeline_results: Dict[str, Any]) -> Dict[int,
             for sym, write in to_remove:
                 del result[sym][write]
 
-            top_result[sdfg.sdfg_id] = result
+            top_result[sdfg.cfg_id] = result
         return top_result
 
 
@@ -445,10 +445,10 @@ def apply_pass(self, top_sdfg: SDFG, pipeline_results: Dict[str, Any]) -> Dict[i
             idom = nx.immediate_dominators(sdfg.nx, sdfg.start_state)
             all_doms = cfg.all_dominators(sdfg, idom)
             access_sets: Dict[SDFGState, Tuple[Set[str],
-                                               Set[str]]] = pipeline_results[AccessSets.__name__][sdfg.sdfg_id]
+                                               Set[str]]] = pipeline_results[AccessSets.__name__][sdfg.cfg_id]
             access_nodes: Dict[str, Dict[SDFGState, Tuple[Set[nd.AccessNode], Set[nd.AccessNode]]]] = pipeline_results[
-                FindAccessNodes.__name__][sdfg.sdfg_id]
-            state_reach: Dict[SDFGState, Set[SDFGState]] = pipeline_results[StateReachability.__name__][sdfg.sdfg_id]
+                FindAccessNodes.__name__][sdfg.cfg_id]
+            state_reach: Dict[SDFGState, Set[SDFGState]] = pipeline_results[StateReachability.__name__][sdfg.cfg_id]
 
             anames = sdfg.arrays.keys()
             for desc in sdfg.arrays:
@@ -503,7 +503,7 @@ def apply_pass(self, top_sdfg: SDFG, pipeline_results: Dict[str, Any]) -> Dict[i
                                     result[desc][write] = set()
                 for write in to_remove:
                     del result[desc][write]
-            top_result[sdfg.sdfg_id] = result
+            top_result[sdfg.cfg_id] = result
         return top_result
 
 
@@ -539,7 +539,7 @@ def apply_pass(self, top_sdfg: SDFG, _) -> Dict[int, Dict[str, Set[Memlet]]]:
                         # Find (hopefully propagated) root memlet
                         e = state.memlet_tree(e).root().edge
                         result[anode.data].add(e.data)
-            top_result[sdfg.sdfg_id] = result
+            top_result[sdfg.cfg_id] = result
         return top_result
 
 
@@ -581,5 +581,5 @@ def apply_pass(self, top_sdfg: SDFG, _) -> Dict[int, Dict[str, Set[Union[Memlet,
                         else:
                             # Array -> Reference
                             result[anode.data].add(e.data)
-            top_result[sdfg.sdfg_id] = result
+            top_result[sdfg.cfg_id] = result
         return top_result
diff --git a/dace/transformation/passes/array_elimination.py b/dace/transformation/passes/array_elimination.py
index d1b80c2327..0281b1249e 100644
--- a/dace/transformation/passes/array_elimination.py
+++ b/dace/transformation/passes/array_elimination.py
@@ -41,9 +41,9 @@ def apply_pass(self, sdfg: SDFG, pipeline_results: Dict[str, Any]) -> Optional[S
         :return: A set of removed data descriptor names, or None if nothing changed.
         """
         result: Set[str] = set()
-        reachable: Dict[SDFGState, Set[SDFGState]] = pipeline_results[ap.StateReachability.__name__][sdfg.sdfg_id]
+        reachable: Dict[SDFGState, Set[SDFGState]] = pipeline_results[ap.StateReachability.__name__][sdfg.cfg_id]
         # Get access nodes and modify set as pass continues
-        access_sets: Dict[str, Set[SDFGState]] = pipeline_results[ap.FindAccessStates.__name__][sdfg.sdfg_id]
+        access_sets: Dict[str, Set[SDFGState]] = pipeline_results[ap.FindAccessStates.__name__][sdfg.cfg_id]
 
         # Traverse SDFG backwards
         try:
@@ -135,7 +135,7 @@ def remove_redundant_views(self, sdfg: SDFG, state: SDFGState, access_nodes: Dic
                 for xform in xforms:
                     # Quick path to setup match
                     candidate = {type(xform).view: anode}
-                    xform.setup_match(sdfg, sdfg.sdfg_id, state_id, candidate, 0, override=True)
+                    xform.setup_match(sdfg, sdfg.cfg_id, state_id, candidate, 0, override=True)
 
                     # Try to apply
                     if xform.can_be_applied(state, 0, sdfg):
@@ -180,7 +180,7 @@ def remove_redundant_copies(self, sdfg: SDFG, state: SDFGState, removable_data:
                             for xform in xforms_first:
                                 # Quick path to setup match
                                 candidate = {type(xform).in_array: anode, type(xform).out_array: succ}
-                                xform.setup_match(sdfg, sdfg.sdfg_id, state_id, candidate, 0, override=True)
+                                xform.setup_match(sdfg, sdfg.cfg_id, state_id, candidate, 0, override=True)
 
                                 # Try to apply
                                 if xform.can_be_applied(state, 0, sdfg):
@@ -200,7 +200,7 @@ def remove_redundant_copies(self, sdfg: SDFG, state: SDFGState, removable_data:
                             for xform in xforms_second:
                                 # Quick path to setup match
                                 candidate = {type(xform).in_array: pred, type(xform).out_array: anode}
-                                xform.setup_match(sdfg, sdfg.sdfg_id, state_id, candidate, 0, override=True)
+                                xform.setup_match(sdfg, sdfg.cfg_id, state_id, candidate, 0, override=True)
 
                                 # Try to apply
                                 if xform.can_be_applied(state, 0, sdfg):
diff --git a/dace/transformation/passes/constant_propagation.py b/dace/transformation/passes/constant_propagation.py
index 9cec6d11af..902cc85b48 100644
--- a/dace/transformation/passes/constant_propagation.py
+++ b/dace/transformation/passes/constant_propagation.py
@@ -129,13 +129,13 @@ def apply_pass(self, sdfg: SDFG, _, initial_symbols: Optional[Dict[str, Any]] =
 
         if self.recursive:
             # Change result to set of tuples
-            sid = sdfg.sdfg_id
+            sid = sdfg.cfg_id
             result = set((sid, sym) for sym in result)
 
             for state in sdfg.nodes():
                 for node in state.nodes():
                     if isinstance(node, nodes.NestedSDFG):
-                        nested_id = node.sdfg.sdfg_id
+                        nested_id = node.sdfg.cfg_id
                         const_syms = {k: v for k, v in node.symbol_mapping.items() if not symbolic.issymbolic(v)}
                         internal = self.apply_pass(node.sdfg, _, const_syms)
                         if internal:
diff --git a/dace/transformation/passes/dead_dataflow_elimination.py b/dace/transformation/passes/dead_dataflow_elimination.py
index d9131385d6..a05557b353 100644
--- a/dace/transformation/passes/dead_dataflow_elimination.py
+++ b/dace/transformation/passes/dead_dataflow_elimination.py
@@ -56,8 +56,8 @@ def apply_pass(self, sdfg: SDFG, pipeline_results: Dict[str, Any]) -> Optional[D
         # Depends on the following analysis passes:
         #  * State reachability
         #  * Read/write access sets per state
-        reachable: Dict[SDFGState, Set[SDFGState]] = pipeline_results['StateReachability'][sdfg.sdfg_id]
-        access_sets: Dict[SDFGState, Tuple[Set[str], Set[str]]] = pipeline_results['AccessSets'][sdfg.sdfg_id]
+        reachable: Dict[SDFGState, Set[SDFGState]] = pipeline_results['StateReachability'][sdfg.cfg_id]
+        access_sets: Dict[SDFGState, Tuple[Set[str], Set[str]]] = pipeline_results['AccessSets'][sdfg.cfg_id]
         result: Dict[SDFGState, Set[str]] = defaultdict(set)
 
         # Traverse SDFG backwards
diff --git a/dace/transformation/passes/optional_arrays.py b/dace/transformation/passes/optional_arrays.py
index fc31e46cdf..fc0cff5a72 100644
--- a/dace/transformation/passes/optional_arrays.py
+++ b/dace/transformation/passes/optional_arrays.py
@@ -46,7 +46,7 @@ def apply_pass(self,
         result: Set[Tuple[int, str]] = set()
         parent_arrays = parent_arrays or {}
 
-        sdfg_id = sdfg.sdfg_id
+        cfg_id = sdfg.cfg_id
 
         # Set information of arrays based on their transient and parent status
         for aname, arr in sdfg.arrays.items():
@@ -54,11 +54,11 @@ def apply_pass(self,
                 continue
             if arr.transient:
                 if arr.optional is not False:
-                    result.add((sdfg_id, aname))
+                    result.add((cfg_id, aname))
                 arr.optional = False
             if aname in parent_arrays:
                 if arr.optional is not parent_arrays[aname]:
-                    result.add((sdfg_id, aname))
+                    result.add((cfg_id, aname))
                 arr.optional = parent_arrays[aname]
 
         # Change unconditionally-accessed arrays to non-optional
@@ -67,7 +67,7 @@ def apply_pass(self,
                 desc = anode.desc(sdfg)
                 if isinstance(desc, data.Array) and desc.optional is None:
                     desc.optional = False
-                    result.add((sdfg_id, anode.data))
+                    result.add((cfg_id, anode.data))
 
         # Propagate information to nested SDFGs
         for state in sdfg.nodes():
diff --git a/dace/transformation/passes/pattern_matching.py b/dace/transformation/passes/pattern_matching.py
index 3f4d51dd9d..31b68057c3 100644
--- a/dace/transformation/passes/pattern_matching.py
+++ b/dace/transformation/passes/pattern_matching.py
@@ -103,7 +103,7 @@ def apply_pass(self, sdfg: SDFG, pipeline_results: Dict[str, Any]) -> Dict[str,
             except StopIteration:
                 continue
 
-            tsdfg = sdfg.cfg_list[match.sdfg_id]
+            tsdfg = sdfg.cfg_list[match.cfg_id]
             graph = tsdfg.node(match.state_id) if match.state_id >= 0 else tsdfg
 
             # Set previous pipeline results
@@ -156,7 +156,7 @@ def __init__(self,
     # Helper function for applying and validating a transformation
     def _apply_and_validate(self, match: xf.PatternTransformation, sdfg: SDFG, start: float,
                             pipeline_results: Dict[str, Any], applied_transformations: Dict[str, Any]):
-        tsdfg = sdfg.cfg_list[match.sdfg_id]
+        tsdfg = sdfg.cfg_list[match.cfg_id]
         graph = tsdfg.node(match.state_id) if match.state_id >= 0 else tsdfg
 
         # Set previous pipeline results
@@ -377,7 +377,7 @@ def _try_to_match_transformation(graph: Union[SDFG, SDFGState], collapsed_graph:
                 for oname, oval in opts.items():
                     setattr(match, oname, oval)
 
-        match.setup_match(sdfg, sdfg.sdfg_id, state_id, subgraph, expr_idx, options=options)
+        match.setup_match(sdfg, sdfg.cfg_id, state_id, subgraph, expr_idx, options=options)
         match_found = match.can_be_applied(graph, expr_idx, sdfg, permissive=permissive)
     except Exception as e:
         if Config.get_bool('optimizer', 'match_exception'):
diff --git a/dace/transformation/passes/prune_symbols.py b/dace/transformation/passes/prune_symbols.py
index cf55f7a9b2..bff2e1350b 100644
--- a/dace/transformation/passes/prune_symbols.py
+++ b/dace/transformation/passes/prune_symbols.py
@@ -54,7 +54,7 @@ def apply_pass(self, sdfg: SDFG, _) -> Optional[Set[Tuple[int, str]]]:
 
         if self.recursive:
             # Prune nested SDFGs recursively
-            sid = sdfg.sdfg_id
+            sid = sdfg.cfg_id
             result = set((sid, sym) for sym in result)
 
             for state in sdfg.nodes():
diff --git a/dace/transformation/passes/reference_reduction.py b/dace/transformation/passes/reference_reduction.py
index 2af76852ba..21b253d30f 100644
--- a/dace/transformation/passes/reference_reduction.py
+++ b/dace/transformation/passes/reference_reduction.py
@@ -37,9 +37,9 @@ def apply_pass(self, sdfg: SDFG, pipeline_results: Dict[str, Any]) -> Optional[S
                                  pipeline, an empty dictionary is expected.
         :return: A set of removed data descriptor names, or None if nothing changed.
         """
-        reachable: Dict[SDFGState, Set[SDFGState]] = pipeline_results[ap.StateReachability.__name__][sdfg.sdfg_id]
-        access_states: Dict[str, Set[SDFGState]] = pipeline_results[ap.FindAccessStates.__name__][sdfg.sdfg_id]
-        reference_sources: Dict[str, Set[Memlet]] = pipeline_results[ap.FindReferenceSources.__name__][sdfg.sdfg_id]
+        reachable: Dict[SDFGState, Set[SDFGState]] = pipeline_results[ap.StateReachability.__name__][sdfg.cfg_id]
+        access_states: Dict[str, Set[SDFGState]] = pipeline_results[ap.FindAccessStates.__name__][sdfg.cfg_id]
+        reference_sources: Dict[str, Set[Memlet]] = pipeline_results[ap.FindReferenceSources.__name__][sdfg.cfg_id]
 
         # Early exit if no references exist
         if not reference_sources:
diff --git a/dace/transformation/passes/scalar_fission.py b/dace/transformation/passes/scalar_fission.py
index 0a6a272fde..eb8faf33e6 100644
--- a/dace/transformation/passes/scalar_fission.py
+++ b/dace/transformation/passes/scalar_fission.py
@@ -36,7 +36,7 @@ def apply_pass(self, sdfg: SDFG, pipeline_results: Dict[str, Any]) -> Optional[D
         """
         results: Dict[str, Set[str]] = defaultdict(lambda: set())
 
-        shadow_scope_dict: ap.WriteScopeDict = pipeline_results[ap.ScalarWriteShadowScopes.__name__][sdfg.sdfg_id]
+        shadow_scope_dict: ap.WriteScopeDict = pipeline_results[ap.ScalarWriteShadowScopes.__name__][sdfg.cfg_id]
 
         for name, write_scope_dict in shadow_scope_dict.items():
             desc = sdfg.arrays[name]
diff --git a/dace/transformation/passes/simplify.py b/dace/transformation/passes/simplify.py
index 1778470b14..2b1411396c 100644
--- a/dace/transformation/passes/simplify.py
+++ b/dace/transformation/passes/simplify.py
@@ -84,7 +84,7 @@ def apply_subpass(self, sdfg: SDFG, p: ppl.Pass, state: Dict[str, Any]):
             for sd in sdfg.all_sdfgs_recursive():
                 subret = p.apply_pass(sd, state)
                 if subret is not None:
-                    ret[sd.sdfg_id] = subret
+                    ret[sd.cfg_id] = subret
             ret = ret or None
         else:
             ret = p.apply_pass(sdfg, state)
diff --git a/dace/transformation/passes/symbol_ssa.py b/dace/transformation/passes/symbol_ssa.py
index eaabc3c743..6f0f4485b0 100644
--- a/dace/transformation/passes/symbol_ssa.py
+++ b/dace/transformation/passes/symbol_ssa.py
@@ -35,7 +35,7 @@ def apply_pass(self, sdfg: SDFG, pipeline_results: Dict[str, Any]) -> Optional[D
         """
         results: Dict[str, Set[str]] = defaultdict(lambda: set())
 
-        symbol_scope_dict: ap.SymbolScopeDict = pipeline_results[ap.SymbolWriteScopes.__name__][sdfg.sdfg_id]
+        symbol_scope_dict: ap.SymbolScopeDict = pipeline_results[ap.SymbolWriteScopes.__name__][sdfg.cfg_id]
 
         for name, scope_dict in symbol_scope_dict.items():
             # If there is only one scope, don't do anything.
diff --git a/dace/transformation/subgraph/composite.py b/dace/transformation/subgraph/composite.py
index ba71b786f8..41d145aaa3 100644
--- a/dace/transformation/subgraph/composite.py
+++ b/dace/transformation/subgraph/composite.py
@@ -67,7 +67,7 @@ def can_be_applied(self, sdfg: SDFG, subgraph: SubgraphView) -> bool:
                 sdfg_copy.reset_cfg_list()
                 graph_copy = sdfg_copy.nodes()[sdfg.nodes().index(graph)]
                 subgraph_copy = SubgraphView(graph_copy, [graph_copy.nodes()[i] for i in graph_indices])
-                expansion.sdfg_id = sdfg_copy.sdfg_id
+                expansion.cfg_id = sdfg_copy.cfg_id
 
                 ##sdfg_copy.apply_transformations(MultiExpansion, states=[graph])
                 #expansion = MultiExpansion()
@@ -107,13 +107,13 @@ def apply(self, sdfg):
 
         if self.allow_expansion:
             expansion = MultiExpansion()
-            expansion.setup_match(subgraph, self.sdfg_id, self.state_id)
+            expansion.setup_match(subgraph, self.cfg_id, self.state_id)
             expansion.permutation_only = not self.expansion_split
             if expansion.can_be_applied(sdfg, subgraph):
                 expansion.apply(sdfg)
 
         sf = SubgraphFusion()
-        sf.setup_match(subgraph, self.sdfg_id, self.state_id)
+        sf.setup_match(subgraph, self.cfg_id, self.state_id)
         if sf.can_be_applied(sdfg, self.subgraph_view(sdfg)):
             # set SubgraphFusion properties
             sf.debug = self.debug
@@ -125,7 +125,7 @@ def apply(self, sdfg):
 
         elif self.allow_tiling == True:
             st = StencilTiling()
-            st.setup_match(subgraph, self.sdfg_id, self.state_id)
+            st.setup_match(subgraph, self.cfg_id, self.state_id)
             if st.can_be_applied(sdfg, self.subgraph_view(sdfg)):
                 # set StencilTiling properties
                 st.debug = self.debug
@@ -136,7 +136,7 @@ def apply(self, sdfg):
                 new_entries = st._outer_entries
                 subgraph = helpers.subgraph_from_maps(sdfg, graph, new_entries)
                 sf = SubgraphFusion()
-                sf.setup_match(subgraph, self.sdfg_id, self.state_id)
+                sf.setup_match(subgraph, self.cfg_id, self.state_id)
                 # set SubgraphFusion properties
                 sf.debug = self.debug
                 sf.transient_allocation = self.transient_allocation
diff --git a/dace/transformation/subgraph/stencil_tiling.py b/dace/transformation/subgraph/stencil_tiling.py
index ab185e4043..6b03b2adba 100644
--- a/dace/transformation/subgraph/stencil_tiling.py
+++ b/dace/transformation/subgraph/stencil_tiling.py
@@ -430,7 +430,7 @@ def apply(self, sdfg):
 
             stripmine_subgraph = {StripMining.map_entry: graph.node_id(map_entry)}
 
-            sdfg_id = sdfg.sdfg_id
+            cfg_id = sdfg.cfg_id
             last_map_entry = None
             original_schedule = map_entry.schedule
             self.tile_sizes = []
@@ -497,7 +497,7 @@ def apply(self, sdfg):
                                map.range[dim_idx][1] - self.tile_offset_upper[-1], map.range[dim_idx][2])
                 map.range[dim_idx] = range_tuple
                 stripmine = StripMining()
-                stripmine.setup_match(sdfg, sdfg_id, self.state_id, stripmine_subgraph, 0)
+                stripmine.setup_match(sdfg, cfg_id, self.state_id, stripmine_subgraph, 0)
 
                 stripmine.tiling_type = dtypes.TilingType.CeilRange
                 stripmine.dim_idx = dim_idx
@@ -538,7 +538,7 @@ def apply(self, sdfg):
                         MapCollapse.inner_map_entry: graph.node_id(new_map_entry)
                     }
                     mapcollapse = MapCollapse()
-                    mapcollapse.setup_match(sdfg, sdfg_id, self.state_id, mapcollapse_subgraph, 0)
+                    mapcollapse.setup_match(sdfg, cfg_id, self.state_id, mapcollapse_subgraph, 0)
                     mapcollapse.apply(graph, sdfg)
                 last_map_entry = graph.in_edges(map_entry)[0].src
             # add last instance of map entries to _outer_entries
@@ -557,7 +557,7 @@ def apply(self, sdfg):
                 if l > 1:
                     subgraph = {MapExpansion.map_entry: graph.node_id(map_entry)}
                     trafo_expansion = MapExpansion()
-                    trafo_expansion.setup_match(sdfg, sdfg.sdfg_id, sdfg.nodes().index(graph), subgraph, 0)
+                    trafo_expansion.setup_match(sdfg, sdfg.cfg_id, sdfg.nodes().index(graph), subgraph, 0)
                     trafo_expansion.apply(graph, sdfg)
                 maps = [map_entry]
                 for _ in range(l - 1):
@@ -568,7 +568,7 @@ def apply(self, sdfg):
                     # MapToForLoop
                     subgraph = {MapToForLoop.map_entry: graph.node_id(map)}
                     trafo_for_loop = MapToForLoop()
-                    trafo_for_loop.setup_match(sdfg, sdfg.sdfg_id, sdfg.nodes().index(graph), subgraph, 0)
+                    trafo_for_loop.setup_match(sdfg, sdfg.cfg_id, sdfg.nodes().index(graph), subgraph, 0)
                     trafo_for_loop.apply(graph, sdfg)
                     nsdfg = trafo_for_loop.nsdfg
 
diff --git a/dace/transformation/testing.py b/dace/transformation/testing.py
index 00fcf84426..79738c9ec3 100644
--- a/dace/transformation/testing.py
+++ b/dace/transformation/testing.py
@@ -68,7 +68,7 @@ def _optimize_recursive(self, sdfg: SDFG, depth: int):
 
                 print('    ' * depth, type(match).__name__, '- ', end='', file=self.stdout)
 
-                tsdfg: SDFG = new_sdfg.cfg_list[match.sdfg_id]
+                tsdfg: SDFG = new_sdfg.cfg_list[match.cfg_id]
                 tgraph = tsdfg.node(match.state_id) if match.state_id >= 0 else tsdfg
                 match._sdfg = tsdfg
                 match.apply(tgraph, tsdfg)
diff --git a/dace/transformation/transformation.py b/dace/transformation/transformation.py
index 7ad84e8f4d..364a4e7291 100644
--- a/dace/transformation/transformation.py
+++ b/dace/transformation/transformation.py
@@ -63,7 +63,7 @@ class PatternTransformation(TransformationBase):
     """
 
     # Properties
-    sdfg_id = Property(dtype=int, category="(Debug)")
+    cfg_id = Property(dtype=int, category="(Debug)")
     state_id = Property(dtype=int, category="(Debug)")
     _subgraph = DictProperty(key_type=int, value_type=int, category="(Debug)")
     expr_index = Property(dtype=int, category="(Debug)")
@@ -156,7 +156,7 @@ def match_to_str(self, graph: Union[SDFG, SDFGState]) -> str:
 
     def setup_match(self,
                     sdfg: SDFG,
-                    sdfg_id: int,
+                    cfg_id: int,
                     state_id: int,
                     subgraph: Dict['PatternNode', int],
                     expr_index: int,
@@ -165,7 +165,7 @@ def setup_match(self,
         """
         Sets the transformation to a given subgraph pattern.
 
-        :param sdfg_id: A unique ID of the SDFG.
+        :param cfg_id: A unique ID of the SDFG.
         :param state_id: The node ID of the SDFG state, if applicable. If
                             transformation does not operate on a single state,
                             the value should be -1.
@@ -184,7 +184,7 @@ def setup_match(self,
         """
 
         self._sdfg = sdfg
-        self.sdfg_id = sdfg_id
+        self.cfg_id = cfg_id
         self.state_id = state_id
         if not override:
             expr = self.expressions()[expr_index]
@@ -224,7 +224,7 @@ def apply_pattern(self, append: bool = True, annotate: bool = True) -> Union[Any
         """
         if append:
             self._sdfg.append_transformation(self)
-        tsdfg: SDFG = self._sdfg.cfg_list[self.sdfg_id]
+        tsdfg: SDFG = self._sdfg.cfg_list[self.cfg_id]
         tgraph = tsdfg.node(self.state_id) if self.state_id >= 0 else tsdfg
         retval = self.apply(tgraph, tsdfg)
         if annotate and not self.annotates_memlets():
@@ -348,7 +348,7 @@ def apply_to(cls,
         # Construct subgraph and instantiate transformation
         subgraph = {required_node_names[k]: graph.node_id(where[k]) for k in required}
         instance = cls()
-        instance.setup_match(sdfg, sdfg.sdfg_id, state_id, subgraph, expr_index)
+        instance.setup_match(sdfg, sdfg.cfg_id, state_id, subgraph, expr_index)
 
         # Construct transformation parameters
         for optname, optval in options.items():
@@ -396,7 +396,7 @@ def from_json(json_obj: Dict[str, Any], context: Dict[str, Any] = None) -> 'Patt
 
         # Reconstruct transformation
         ret = xform()
-        ret.setup_match(None, json_obj.get('sdfg_id', 0), json_obj.get('state_id', 0), subgraph,
+        ret.setup_match(None, json_obj.get('cfg_id', 0), json_obj.get('state_id', 0), subgraph,
                         json_obj.get('expr_index', 0))
         context = context or {}
         context['transformation'] = ret
@@ -658,7 +658,7 @@ def from_json(json_obj: Dict[str, Any], context: Dict[str, Any] = None) -> 'Expa
 
         # Reconstruct transformation
         ret = xform()
-        ret.setup_match(None, json_obj.get('sdfg_id', 0), json_obj.get('state_id', 0), subgraph,
+        ret.setup_match(None, json_obj.get('cfg_id', 0), json_obj.get('state_id', 0), subgraph,
                         json_obj.get('expr_index', 0))
         context = context or {}
         context['transformation'] = ret
@@ -680,22 +680,22 @@ class SubgraphTransformation(TransformationBase):
     class docstring for more information.
     """
 
-    sdfg_id = Property(dtype=int, desc='ID of SDFG to transform')
+    cfg_id = Property(dtype=int, desc='ID of SDFG to transform')
     state_id = Property(dtype=int, desc='ID of state to transform subgraph within, or -1 to transform the '
                         'SDFG')
     subgraph = SetProperty(element_type=int, desc='Subgraph in transformation instance')
 
-    def setup_match(self, subgraph: Union[Set[int], gr.SubgraphView], sdfg_id: int = None, state_id: int = None):
+    def setup_match(self, subgraph: Union[Set[int], gr.SubgraphView], cfg_id: int = None, state_id: int = None):
         """
         Sets the transformation to a given subgraph.
 
         :param subgraph: A set of node (or state) IDs or a subgraph view object.
-        :param sdfg_id: A unique ID of the SDFG.
+        :param cfg_id: A unique ID of the SDFG.
         :param state_id: The node ID of the SDFG state, if applicable. If
                             transformation does not operate on a single state,
                             the value should be -1.
         """
-        if (not isinstance(subgraph, (gr.SubgraphView, SDFG, SDFGState)) and (sdfg_id is None or state_id is None)):
+        if (not isinstance(subgraph, (gr.SubgraphView, SDFG, SDFGState)) and (cfg_id is None or state_id is None)):
             raise TypeError('Subgraph transformation either expects a SubgraphView or a '
                             'set of node IDs, SDFG ID and state ID (or -1).')
 
@@ -710,20 +710,20 @@ def setup_match(self, subgraph: Union[Set[int], gr.SubgraphView], sdfg_id: int =
 
             if isinstance(subgraph.graph, SDFGState):
                 sdfg = subgraph.graph.parent
-                self.sdfg_id = sdfg.sdfg_id
+                self.cfg_id = sdfg.cfg_id
                 self.state_id = sdfg.node_id(subgraph.graph)
             elif isinstance(subgraph.graph, SDFG):
-                self.sdfg_id = subgraph.graph.sdfg_id
+                self.cfg_id = subgraph.graph.cfg_id
                 self.state_id = -1
             else:
                 raise TypeError('Unrecognized graph type "%s"' % type(subgraph.graph).__name__)
         else:
             self.subgraph = subgraph
-            self.sdfg_id = sdfg_id
+            self.cfg_id = cfg_id
             self.state_id = state_id
 
     def get_subgraph(self, sdfg: SDFG) -> gr.SubgraphView:
-        sdfg = sdfg.cfg_list[self.sdfg_id]
+        sdfg = sdfg.cfg_list[self.cfg_id]
         if self.state_id == -1:
             return gr.SubgraphView(sdfg, list(map(sdfg.node, self.subgraph)))
         state = sdfg.node(self.state_id)
@@ -748,7 +748,7 @@ def subclasses_recursive(cls) -> Set[Type['PatternTransformation']]:
         return result
 
     def subgraph_view(self, sdfg: SDFG) -> gr.SubgraphView:
-        graph = sdfg.cfg_list[self.sdfg_id]
+        graph = sdfg.cfg_list[self.cfg_id]
         if self.state_id != -1:
             graph = graph.node(self.state_id)
         return gr.SubgraphView(graph, [graph.node(idx) for idx in self.subgraph])
@@ -835,7 +835,7 @@ def apply_to(cls,
             # Construct subgraph and instantiate transformation
             subgraph = gr.SubgraphView(graph, where)
             instance = cls()
-            instance.setup_match(subgraph, sdfg.sdfg_id, state_id)
+            instance.setup_match(subgraph, sdfg.cfg_id, state_id)
         else:
             # Construct instance from subgraph directly
             instance = cls()
@@ -866,7 +866,7 @@ def from_json(json_obj: Dict[str, Any], context: Dict[str, Any] = None) -> 'Subg
 
         # Reconstruct transformation
         ret = xform()
-        ret.setup_match(json_obj.get('subgraph', {}), json_obj.get('sdfg_id', 0), json_obj.get('state_id', 0))
+        ret.setup_match(json_obj.get('subgraph', {}), json_obj.get('cfg_id', 0), json_obj.get('state_id', 0))
         context = context or {}
         context['transformation'] = ret
         serialize.set_properties_from_json(ret, json_obj, context=context, ignore_properties={'transformation', 'type'})
diff --git a/samples/instrumentation/matmul_likwid.py b/samples/instrumentation/matmul_likwid.py
index 9da3d9a5d5..e9d0ae4938 100644
--- a/samples/instrumentation/matmul_likwid.py
+++ b/samples/instrumentation/matmul_likwid.py
@@ -82,7 +82,7 @@ def matmul(A: dace.float32[M, K], B: dace.float32[K, N], C: dace.float32[M, N]):
 #
 # Counter values are grouped by the SDFG element which defines the scope
 # of the intrumentation. Those elements are described as the triplet
-# (sdfg_id, state_id, node_id).
+# (cfg_id, state_id, node_id).
 
 measured_flops = 0
 flops_report = report.counters[(0, 0, -1)]["state_0_0_-1"]["RETIRED_SSE_AVX_FLOPS_SINGLE_ALL"]
diff --git a/tests/codegen/allocation_lifetime_test.py b/tests/codegen/allocation_lifetime_test.py
index 8aff1c83e0..9a68cd2140 100644
--- a/tests/codegen/allocation_lifetime_test.py
+++ b/tests/codegen/allocation_lifetime_test.py
@@ -44,11 +44,11 @@ def _test_determine_alloc(lifetime: dace.AllocationLifetime, unused: bool = Fals
 
 
 def _check_alloc(id, name, codegen, scope):
-    # for sdfg_id, _, node in codegen.to_allocate[scope]:
-    #     if id == sdfg_id and name == node.data:
+    # for cfg_id, _, node in codegen.to_allocate[scope]:
+    #     if id == cfg_id and name == node.data:
     #         return True
     for sdfg, _, node, _, _, _ in codegen.to_allocate[scope]:
-        if sdfg.sdfg_id == id and name == node.data:
+        if sdfg.cfg_id == id and name == node.data:
             return True
     return False
 
diff --git a/tests/parse_state_struct_test.py b/tests/parse_state_struct_test.py
index 58ec2dfd14..c7bdde9448 100644
--- a/tests/parse_state_struct_test.py
+++ b/tests/parse_state_struct_test.py
@@ -85,7 +85,7 @@ def persistent_transient(A: dace.float32[3, 3]):
     state_struct = compiledsdfg.get_state_struct()
 
     # copy the B array into the transient ptr
-    ptr = getattr(state_struct, f'__{sdfg.sdfg_id}_persistent_transient')
+    ptr = getattr(state_struct, f'__{sdfg.cfg_id}_persistent_transient')
     cuda_helper.host_to_gpu(ptr, B.copy())
     result = np.zeros_like(B)
     compiledsdfg(A=A, __return=result)
diff --git a/tests/transformations/subgraph_fusion/block_allreduce_cudatest.py b/tests/transformations/subgraph_fusion/block_allreduce_cudatest.py
index 4a58656332..086dd1d01b 100644
--- a/tests/transformations/subgraph_fusion/block_allreduce_cudatest.py
+++ b/tests/transformations/subgraph_fusion/block_allreduce_cudatest.py
@@ -33,12 +33,12 @@ def test_blockallreduce():
     result1 = csdfg(A=A, M=M, N=N)
     del csdfg
 
-    sdfg_id = 0
+    cfg_id = 0
     state_id = 0
     subgraph = {ReduceExpansion.reduce: graph.node_id(reduce_node)}
     # expand first
     transform = ReduceExpansion()
-    transform.setup_match(sdfg, sdfg_id, state_id, subgraph, 0)
+    transform.setup_match(sdfg, cfg_id, state_id, subgraph, 0)
     transform.reduce_implementation = 'CUDA (block allreduce)'
     transform.apply(sdfg.node(0), sdfg)
     csdfg = sdfg.compile()
diff --git a/tests/transformations/subgraph_fusion/reduction_test.py b/tests/transformations/subgraph_fusion/reduction_test.py
index fa738e9dae..b45fc9a293 100644
--- a/tests/transformations/subgraph_fusion/reduction_test.py
+++ b/tests/transformations/subgraph_fusion/reduction_test.py
@@ -53,7 +53,7 @@ def test_p1(in_transient, out_transient):
             reduce_node = node
 
     rexp = ReduceExpansion()
-    rexp.setup_match(sdfg, sdfg.sdfg_id, 0, {ReduceExpansion.reduce: state.node_id(reduce_node)}, 0)
+    rexp.setup_match(sdfg, sdfg.cfg_id, 0, {ReduceExpansion.reduce: state.node_id(reduce_node)}, 0)
     assert rexp.can_be_applied(state, 0, sdfg) == True
 
     A = np.random.rand(M.get(), N.get()).astype(np.float64)
diff --git a/tests/transformations/subgraph_fusion/util.py b/tests/transformations/subgraph_fusion/util.py
index e16ae68fec..ff535c689a 100644
--- a/tests/transformations/subgraph_fusion/util.py
+++ b/tests/transformations/subgraph_fusion/util.py
@@ -23,7 +23,7 @@ def expand_reduce(sdfg: dace.SDFG,
         for node in sg.nodes():
             if isinstance(node, stdlib.Reduce):
                 rexp = ReduceExpansion()
-                rexp.setup_match(sdfg, sdfg.sdfg_id, sdfg.node_id(graph), {ReduceExpansion.reduce: graph.node_id(node)},
+                rexp.setup_match(sdfg, sdfg.cfg_id, sdfg.node_id(graph), {ReduceExpansion.reduce: graph.node_id(node)},
                                  0)
                 if not rexp.can_be_applied(graph, 0, sdfg):
                     print(f"WARNING: Cannot expand reduce node {node}:" "can_be_applied() failed.")
@@ -31,7 +31,7 @@ def expand_reduce(sdfg: dace.SDFG,
                 reduce_nodes.append(node)
 
         trafo_reduce = ReduceExpansion()
-        trafo_reduce.setup_match(sdfg, sdfg.sdfg_id, sdfg.node_id(graph), {}, 0)
+        trafo_reduce.setup_match(sdfg, sdfg.cfg_id, sdfg.node_id(graph), {}, 0)
         for (property, val) in kwargs.items():
             setattr(trafo_reduce, property, val)
 

From 27a350e3fb12e20ab8b652e95b3d996394216b26 Mon Sep 17 00:00:00 2001
From: Philipp Schaad <schaad.phil@gmail.com>
Date: Mon, 29 Jan 2024 16:24:01 +0100
Subject: [PATCH 6/7] Address review comments, update docs

---
 dace/runtime/include/dace/perf/reporting.h    |  2 +-
 dace/sdfg/sdfg.py                             | 21 ++++++++--
 dace/sdfg/state.py                            | 13 +++++++
 .../dataflow/reduce_expansion.py              |  4 +-
 dace/transformation/passes/optional_arrays.py |  2 +-
 dace/transformation/transformation.py         | 39 ++++++++-----------
 6 files changed, 52 insertions(+), 29 deletions(-)

diff --git a/dace/runtime/include/dace/perf/reporting.h b/dace/runtime/include/dace/perf/reporting.h
index 9b9a59ab09..65d6999205 100644
--- a/dace/runtime/include/dace/perf/reporting.h
+++ b/dace/runtime/include/dace/perf/reporting.h
@@ -113,7 +113,7 @@ namespace perf {
          * @param cat:      Comma separated categories the event belongs to.
          * @param tstart:   Start timestamp of the event.
          * @param tend:     End timestamp of the event.
-         * @param cfg_id:  SDFG ID of the element associated with this event.
+         * @param cfg_id:   Control flow graph ID of the element associated with this event.
          * @param state_id: State ID of the element associated with this event.
          * @param el_id:    ID of the element associated with this event.
          */
diff --git a/dace/sdfg/sdfg.py b/dace/sdfg/sdfg.py
index 484bab8116..0f55817e23 100644
--- a/dace/sdfg/sdfg.py
+++ b/dace/sdfg/sdfg.py
@@ -561,9 +561,9 @@ def __deepcopy__(self, memo):
     @property
     def sdfg_id(self):
         """
-        Returns the unique index of the current SDFG within the current
-        tree of SDFGs (top-level SDFG is 0, nested SDFGs are greater).
-        :note: `sdfg_id` is deprecated, please use `cfg_id` instead.
+        Returns the unique index of the current CFG within the current tree of CFGs (Top-level CFG/SDFG is 0, nested
+        CFGs/SDFGs are greater).
+        :note: ``sdfg_id`` is deprecated, please use ``cfg_id`` instead.
         """
         return self.cfg_id
 
@@ -1110,10 +1110,25 @@ def remove_data(self, name, validate=True):
         del self._arrays[name]
 
     def reset_sdfg_list(self):
+        """
+        Reset the CFG list when changes have been made to the SDFG's CFG tree.
+        This collects all control flow graphs recursively and propagates the collection to all CFGs as the new CFG list.
+        :note: ``reset_sdfg_list`` is deprecated, please use ``reset_cfg_list`` instead.
+
+        :return: The newly updated CFG list.
+        """
         warnings.warn('reset_sdfg_list is deprecated, use reset_cfg_list instead', DeprecationWarning)
         return self.reset_cfg_list()
 
     def update_sdfg_list(self, sdfg_list):
+        """
+        Given a collection of CFGs, add them all to the current SDFG's CFG list.
+        Any CFGs already in the list are skipped, and the newly updated list is propagated across all CFGs in the CFG
+        tree.
+        :note: ``update_sdfg_list`` is deprecated, please use ``update_cfg_list`` instead.
+
+        :param sdfg_list: The collection of CFGs to add to the CFG list.
+        """
         warnings.warn('update_sdfg_list is deprecated, use update_cfg_list instead', DeprecationWarning)
         self.update_cfg_list(sdfg_list)
 
diff --git a/dace/sdfg/state.py b/dace/sdfg/state.py
index 337d2729d8..ea1d03fd39 100644
--- a/dace/sdfg/state.py
+++ b/dace/sdfg/state.py
@@ -2381,6 +2381,12 @@ def __init__(self, label: str='', sdfg: Optional['SDFG'] = None):
         self._cfg_list: List['ControlFlowRegion'] = [self]
 
     def reset_cfg_list(self) -> List['ControlFlowRegion']:
+        """
+        Reset the CFG list when changes have been made to the SDFG's CFG tree.
+        This collects all control flow graphs recursively and propagates the collection to all CFGs as the new CFG list.
+
+        :return: The newly updated CFG list.
+        """
         if isinstance(self, dace.SDFG) and self.parent_sdfg is not None:
             return self.parent_sdfg.reset_cfg_list()
         elif self._parent_graph is not None:
@@ -2393,6 +2399,13 @@ def reset_cfg_list(self) -> List['ControlFlowRegion']:
         return self._cfg_list
 
     def update_cfg_list(self, cfg_list):
+        """
+        Given a collection of CFGs, add them all to the current SDFG's CFG list.
+        Any CFGs already in the list are skipped, and the newly updated list is propagated across all CFGs in the CFG
+        tree.
+
+        :param cfg_list: The collection of CFGs to add to the CFG list.
+        """
         # TODO: Refactor
         sub_cfg_list = self._cfg_list
         for g in cfg_list:
diff --git a/dace/transformation/dataflow/reduce_expansion.py b/dace/transformation/dataflow/reduce_expansion.py
index 3f6cc1249b..7be35b2914 100644
--- a/dace/transformation/dataflow/reduce_expansion.py
+++ b/dace/transformation/dataflow/reduce_expansion.py
@@ -183,7 +183,7 @@ def expand(self, sdfg: SDFG, graph: SDFGState, reduce_node):
                 LocalStorage.node_a: nsdfg.sdfg.nodes()[0].nodes().index(inner_exit),
                 LocalStorage.node_b: nsdfg.sdfg.nodes()[0].nodes().index(outer_exit)
             }
-            nsdfg_id = nsdfg.sdfg.cfg_id
+            nsdfg_id = nsdfg.sdfg.cfg_list.index(nsdfg.sdfg)
             nstate_id = 0
             local_storage = OutLocalStorage()
             local_storage.setup_match(nsdfg.sdfg, nsdfg_id, nstate_id, local_storage_subgraph, 0)
@@ -215,7 +215,7 @@ def expand(self, sdfg: SDFG, graph: SDFGState, reduce_node):
                 LocalStorage.node_b: nsdfg.sdfg.nodes()[0].nodes().index(inner_entry)
             }
 
-            nsdfg_id = nsdfg.sdfg.cfg_id
+            nsdfg_id = nsdfg.sdfg.cfg_list.index(nsdfg.sdfg)
             nstate_id = 0
             local_storage = InLocalStorage()
             local_storage.setup_match(nsdfg.sdfg, nsdfg_id, nstate_id, local_storage_subgraph, 0)
diff --git a/dace/transformation/passes/optional_arrays.py b/dace/transformation/passes/optional_arrays.py
index fc0cff5a72..e43448415f 100644
--- a/dace/transformation/passes/optional_arrays.py
+++ b/dace/transformation/passes/optional_arrays.py
@@ -41,7 +41,7 @@ def apply_pass(self,
                                  results as ``{Pass subclass name: returned object from pass}``. If not run in a
                                  pipeline, an empty dictionary is expected.
         :param parent_arrays: If not None, contains values of determined arrays from the parent SDFG.
-        :return: A set of the modified array names as a 2-tuple (SDFG ID, name), or None if nothing was changed.
+        :return: A set of the modified array names as a 2-tuple (CFG ID, name), or None if nothing was changed.
         """
         result: Set[Tuple[int, str]] = set()
         parent_arrays = parent_arrays or {}
diff --git a/dace/transformation/transformation.py b/dace/transformation/transformation.py
index 364a4e7291..082a9028f1 100644
--- a/dace/transformation/transformation.py
+++ b/dace/transformation/transformation.py
@@ -211,24 +211,21 @@ def subgraph(self):
 
     def apply_pattern(self, append: bool = True, annotate: bool = True) -> Union[Any, None]:
         """
-        Applies this transformation on the given SDFG, using the transformation
-        instance to find the right SDFG object (based on SDFG ID), and applying
-        memlet propagation as necessary.
-
-        :param sdfg: The SDFG (or an SDFG in the same hierarchy) to apply the
-                     transformation to.
-        :param append: If True, appends the transformation to the SDFG
-                       transformation history.
-        :return: A transformation-defined return value, which could be used
-                 to pass analysis data out, or nothing.
+        Applies this transformation on the given SDFG, using the transformation instance to find the right control flow
+        graph object (based on control flow graph ID), and applying memlet propagation as necessary.
+
+        :param append: If True, appends the transformation to the SDFG transformation history.
+        :param annotate: If True, applies memlet propagation as necessary.
+        :return: A transformation-defined return value, which could be used to pass analysis data out, or nothing.
         """
         if append:
             self._sdfg.append_transformation(self)
-        tsdfg: SDFG = self._sdfg.cfg_list[self.cfg_id]
-        tgraph = tsdfg.node(self.state_id) if self.state_id >= 0 else tsdfg
+        tcfg = self._sdfg.cfg_list[self.cfg_id]
+        tsdfg = tcfg.sdfg if not isinstance(tcfg, SDFG) else tcfg
+        tgraph = tcfg.node(self.state_id) if self.state_id >= 0 else tcfg
         retval = self.apply(tgraph, tsdfg)
-        if annotate and not self.annotates_memlets():
-            propagation.propagate_memlets_sdfg(tsdfg)
+        if annotate and not self.annotates_memlets(tsdfg):
+            propagation.propagate_memlets_sdfg()
         return retval
 
     def __lt__(self, other: 'PatternTransformation') -> bool:
@@ -680,9 +677,8 @@ class SubgraphTransformation(TransformationBase):
     class docstring for more information.
     """
 
-    cfg_id = Property(dtype=int, desc='ID of SDFG to transform')
-    state_id = Property(dtype=int, desc='ID of state to transform subgraph within, or -1 to transform the '
-                        'SDFG')
+    cfg_id = Property(dtype=int, desc='ID of CFG to transform')
+    state_id = Property(dtype=int, desc='ID of state to transform subgraph within, or -1 to transform the SDFG')
     subgraph = SetProperty(element_type=int, desc='Subgraph in transformation instance')
 
     def setup_match(self, subgraph: Union[Set[int], gr.SubgraphView], cfg_id: int = None, state_id: int = None):
@@ -690,14 +686,13 @@ def setup_match(self, subgraph: Union[Set[int], gr.SubgraphView], cfg_id: int =
         Sets the transformation to a given subgraph.
 
         :param subgraph: A set of node (or state) IDs or a subgraph view object.
-        :param cfg_id: A unique ID of the SDFG.
-        :param state_id: The node ID of the SDFG state, if applicable. If
-                            transformation does not operate on a single state,
-                            the value should be -1.
+        :param cfg_id: A unique ID of the CFG.
+        :param state_id: The node ID of the SDFG state, if applicable. If transformation does not operate on a single
+                         state, the value should be -1.
         """
         if (not isinstance(subgraph, (gr.SubgraphView, SDFG, SDFGState)) and (cfg_id is None or state_id is None)):
             raise TypeError('Subgraph transformation either expects a SubgraphView or a '
-                            'set of node IDs, SDFG ID and state ID (or -1).')
+                            'set of node IDs, control flow graph ID and state ID (or -1).')
 
         self._pipeline_results = None
 

From 5aeec96d3da17364bc37516a7813107ba9087b05 Mon Sep 17 00:00:00 2001
From: Philipp Schaad <schaad.phil@gmail.com>
Date: Mon, 29 Jan 2024 17:23:23 +0100
Subject: [PATCH 7/7] Fix blunder

---
 dace/transformation/transformation.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/dace/transformation/transformation.py b/dace/transformation/transformation.py
index 082a9028f1..8b87939ca8 100644
--- a/dace/transformation/transformation.py
+++ b/dace/transformation/transformation.py
@@ -224,8 +224,8 @@ def apply_pattern(self, append: bool = True, annotate: bool = True) -> Union[Any
         tsdfg = tcfg.sdfg if not isinstance(tcfg, SDFG) else tcfg
         tgraph = tcfg.node(self.state_id) if self.state_id >= 0 else tcfg
         retval = self.apply(tgraph, tsdfg)
-        if annotate and not self.annotates_memlets(tsdfg):
-            propagation.propagate_memlets_sdfg()
+        if annotate and not self.annotates_memlets():
+            propagation.propagate_memlets_sdfg(tsdfg)
         return retval
 
     def __lt__(self, other: 'PatternTransformation') -> bool: