diff --git a/code/__DEFINES/rust_g.dm b/code/__DEFINES/rust_g.dm index 5789f44ffd7..0cd48223ec4 100644 --- a/code/__DEFINES/rust_g.dm +++ b/code/__DEFINES/rust_g.dm @@ -38,12 +38,7 @@ #define RUST_G (__rust_g || __detect_rust_g()) #endif -// Handle 515 call() -> call_ext() changes -#if DM_VERSION >= 515 #define RUSTG_CALL call_ext -#else -#define RUSTG_CALL call -#endif /// Gets the version of rust_g /proc/rustg_get_version() return RUSTG_CALL(RUST_G, "get_version")() diff --git a/code/__HELPERS/icons.dm b/code/__HELPERS/icons.dm index 57404802845..bb03061fef9 100644 --- a/code/__HELPERS/icons.dm +++ b/code/__HELPERS/icons.dm @@ -988,7 +988,7 @@ ColorTone(rgb, tone) if(isicon(icon) && isfile(icon)) //icons compiled in from 'icons/path/to/dmi_file.dmi' at compile time are weird and arent really /icon objects, ///but they pass both isicon() and isfile() checks. theyre the easiest case since stringifying them gives us the path we want - var/icon_ref = "\ref[icon]" + var/icon_ref = text_ref(icon) var/locate_icon_string = "[locate(icon_ref)]" icon_path = locate_icon_string @@ -999,7 +999,7 @@ ColorTone(rgb, tone) // the rsc reference returned by fcopy_rsc() will be stringifiable to "icons/path/to/dmi_file.dmi" var/rsc_ref = fcopy_rsc(icon) - var/icon_ref = "\ref[rsc_ref]" + var/icon_ref = text_ref(rsc_ref) var/icon_path_string = "[locate(icon_ref)]" @@ -1009,7 +1009,7 @@ ColorTone(rgb, tone) var/rsc_ref = fcopy_rsc(icon) //if its the text path of an existing dmi file, the rsc reference returned by fcopy_rsc() will be stringifiable to a dmi path - var/rsc_ref_ref = "\ref[rsc_ref]" + var/rsc_ref_ref = text_ref(rsc_ref) var/rsc_ref_string = "[locate(rsc_ref_ref)]" icon_path = rsc_ref_string diff --git a/code/controllers/master.dm b/code/controllers/master.dm index 11dbcfe93a3..7e34885bc58 100644 --- a/code/controllers/master.dm +++ b/code/controllers/master.dm @@ -593,6 +593,10 @@ GLOBAL_REAL(Master, /datum/controller/master) continue if ((SS_flags & (SS_TICKER|SS_KEEP_TIMING)) == SS_KEEP_TIMING && SS.last_fire + (SS.wait * 0.75) > world.time) continue + if (SS.postponed_fires >= 1) + SS.postponed_fires-- + SS.update_nextfire() + continue SS.enqueue() . = 1 @@ -709,15 +713,8 @@ GLOBAL_REAL(Master, /datum/controller/master) queue_node.last_fire = world.time queue_node.times_fired++ - if (queue_node_flags & SS_TICKER) - queue_node.next_fire = world.time + (world.tick_lag * queue_node.wait) - else if (queue_node_flags & SS_POST_FIRE_TIMING) - queue_node.next_fire = world.time + queue_node.wait + (world.tick_lag * (queue_node.tick_overrun * 0.01)) - else if (queue_node_flags & SS_KEEP_TIMING) - queue_node.next_fire += queue_node.wait - else - queue_node.next_fire = queue_node.queued_time + queue_node.wait + (world.tick_lag * (queue_node.tick_overrun * 0.01)) - + queue_node.update_nextfire() + queue_node.queued_time = 0 //remove from queue diff --git a/code/controllers/subsystem.dm b/code/controllers/subsystem.dm index 0a44e8d9930..41704e443d7 100644 --- a/code/controllers/subsystem.dm +++ b/code/controllers/subsystem.dm @@ -60,6 +60,8 @@ var/ticks = 1 ///number of times we have called fire() var/times_fired = 0 + /// How many fires have we been requested to postpone + var/postponed_fires = 0 ///time we entered the queue, (for timing and priority reasons) var/queued_time = 0 ///we keep a running total to make the math easier, if priority changes mid-fire that would break our running total, so we store it here @@ -111,6 +113,29 @@ Master.subsystems -= src return ..() +/** Update next_fire for the next run. + * reset_time (bool) - Ignore things that would normally alter the next fire, like tick_overrun, and last_fire. (also resets postpone) + */ +/datum/controller/subsystem/proc/update_nextfire(reset_time = FALSE) + var/queue_node_flags = flags + + if (reset_time) + postponed_fires = 0 + if (queue_node_flags & SS_TICKER) + next_fire = world.time + (world.tick_lag * wait) + else + next_fire = world.time + wait + return + + if (queue_node_flags & SS_TICKER) + next_fire = world.time + (world.tick_lag * wait) + else if (queue_node_flags & SS_POST_FIRE_TIMING) + next_fire = world.time + wait + (world.tick_lag * (tick_overrun/100)) + else if (queue_node_flags & SS_KEEP_TIMING) + next_fire += wait + else + next_fire = queued_time + wait + (world.tick_lag * (tick_overrun/100)) + ///Queue it to run. /// (we loop thru a linked list until we get to the end or find the right point) /// (this lets us sort our run order correctly without having to re-sort the entire already sorted list) @@ -226,8 +251,8 @@ //could be used to postpone a costly subsystem for (default one) var/cycles, cycles //for instance, during cpu intensive operations like explosions /datum/controller/subsystem/proc/postpone(cycles = 1) - if(next_fire - world.time < wait) - next_fire += (wait*cycles) + if(can_fire && cycles >= 1) + postponed_fires += cycles //usually called via datum/controller/subsystem/New() when replacing a subsystem (i.e. due to a recurring crash) //should attempt to salvage what it can from the old instance of subsystem @@ -238,7 +263,7 @@ if ("can_fire") //this is so the subsystem doesn't rapid fire to make up missed ticks causing more lag if (var_value) - next_fire = world.time + wait + update_nextfire(reset_time = TRUE) if ("queued_priority") //editing this breaks things. return 0 . = ..() diff --git a/code/controllers/subsystem/atoms.dm b/code/controllers/subsystem/atoms.dm index 6b0b114555b..5e2b97bb5a1 100644 --- a/code/controllers/subsystem/atoms.dm +++ b/code/controllers/subsystem/atoms.dm @@ -8,9 +8,10 @@ SUBSYSTEM_DEF(atoms) init_order = INIT_ORDER_ATOMS flags = SS_NO_FIRE - var/old_initialized - /// A count of how many initalize changes we've made. We want to prevent old_initialize being overriden by some other value, breaking init code - var/initialized_changed = 0 + /// A stack of list(source, desired initialized state) + /// We read the source of init changes from the last entry, and assert that all changes will come with a reset + var/list/initialized_state = list() + var/base_initialized var/list/late_loaders = list() @@ -47,11 +48,16 @@ SUBSYSTEM_DEF(atoms) if(initialized == INITIALIZATION_INSSATOMS) return - set_tracked_initalized(INITIALIZATION_INNEW_MAPLOAD) + // Generate a unique mapload source for this run of InitializeAtoms + var/static/uid = 0 + uid = (uid + 1) % (SHORT_REAL_LIMIT - 1) + var/source = "subsystem init [uid]" + set_tracked_initalized(INITIALIZATION_INNEW_MAPLOAD, source) // This may look a bit odd, but if the actual atom creation runtimes for some reason, we absolutely need to set initialized BACK - CreateAtoms(atoms, atoms_to_return) - clear_tracked_initalize() + CreateAtoms(atoms, atoms_to_return, source) + clear_tracked_initalize(source) + SSicon_smooth.free_deferred(source) if(length(late_loaders)) for(var/I in 1 to length(late_loaders)) @@ -78,36 +84,54 @@ SUBSYSTEM_DEF(atoms) #endif /// Actually creates the list of atoms. Exists soley so a runtime in the creation logic doesn't cause initalized to totally break -/datum/controller/subsystem/atoms/proc/CreateAtoms(list/atoms, list/atoms_to_return = null) +/datum/controller/subsystem/atoms/proc/CreateAtoms(list/atoms, list/atoms_to_return = null, mapload_source = null) if (atoms_to_return) LAZYINITLIST(created_atoms) + #ifdef TESTING var/count + #endif + var/list/mapload_arg = list(TRUE) if(atoms) + #ifdef TESTING count = length(atoms) - for(var/I in 1 to count) + #endif + + for(var/I in 1 to length(atoms)) var/atom/A = atoms[I] if(A.flags_atom & INITIALIZED) continue - CHECK_TICK + // Unrolled CHECK_TICK setup to let us enable/disable mapload based off source + if(TICK_CHECK) + clear_tracked_initalize(mapload_source) + stoplag() + if(mapload_source) + set_tracked_initalized(INITIALIZATION_INNEW_MAPLOAD, mapload_source) PROFILE_INIT_ATOM_BEGIN() InitAtom(A, TRUE, mapload_arg) PROFILE_INIT_ATOM_END(A) else + #ifdef TESTING count = 0 - for(var/atom/A in world) + #endif + for(var/atom/A as anything in world) if(A.flags_atom & INITIALIZED) continue PROFILE_INIT_ATOM_BEGIN() InitAtom(A, FALSE, mapload_arg) PROFILE_INIT_ATOM_END(A) + #ifdef TESTING ++count - CHECK_TICK + #endif + if(TICK_CHECK) + clear_tracked_initalize(mapload_source) + stoplag() + if(mapload_source) + set_tracked_initalized(INITIALIZATION_INNEW_MAPLOAD, mapload_source) testing("Initialized [count] atoms") - pass(count) /// Init this specific atom /datum/controller/subsystem/atoms/proc/InitAtom(atom/A, from_template = FALSE, list/arguments) @@ -116,30 +140,33 @@ SUBSYSTEM_DEF(atoms) BadInitializeCalls[the_type] |= BAD_INIT_QDEL_BEFORE return TRUE + // This is handled and battle tested by dreamchecker. Limit to UNIT_TESTS just in case that ever fails. + #ifdef UNIT_TESTS var/start_tick = world.time + #endif var/result = A.Initialize(arglist(arguments)) + #ifdef UNIT_TESTS if(start_tick != world.time) BadInitializeCalls[the_type] |= BAD_INIT_SLEPT + #endif var/qdeleted = FALSE - if(result != INITIALIZE_HINT_NORMAL) - switch(result) - if(INITIALIZE_HINT_LATELOAD) - if(arguments[1]) //mapload - late_loaders += A - else - A.LateInitialize() - if(INITIALIZE_HINT_QDEL) - qdel(A) - qdeleted = TRUE - if(INITIALIZE_HINT_QDEL_FORCE) - qdel(A, force = TRUE) - qdeleted = TRUE + switch(result) + if(INITIALIZE_HINT_NORMAL) + EMPTY_BLOCK_GUARD // pass + if(INITIALIZE_HINT_LATELOAD) + if(arguments[1]) //mapload + late_loaders += A else - BadInitializeCalls[the_type] |= BAD_INIT_NO_HINT + A.LateInitialize() + if(INITIALIZE_HINT_QDEL) + qdel(A) + qdeleted = TRUE + else + BadInitializeCalls[the_type] |= BAD_INIT_NO_HINT if(!A) //possible harddel qdeleted = TRUE @@ -152,31 +179,46 @@ SUBSYSTEM_DEF(atoms) return qdeleted || QDELING(A) -/datum/controller/subsystem/atoms/proc/map_loader_begin() - set_tracked_initalized(INITIALIZATION_INSSATOMS) - -/datum/controller/subsystem/atoms/proc/map_loader_stop() - clear_tracked_initalize() - -/// Use this to set initialized to prevent error states where old_initialized is overriden. It keeps happening and it's cheesing me off -/datum/controller/subsystem/atoms/proc/set_tracked_initalized(value) - if(!initialized_changed) - old_initialized = initialized - initialized = value - else - stack_trace("We started maploading while we were already maploading. You doing something odd?") - initialized_changed += 1 - -/datum/controller/subsystem/atoms/proc/clear_tracked_initalize() - initialized_changed -= 1 - if(!initialized_changed) - initialized = old_initialized +/datum/controller/subsystem/atoms/proc/map_loader_begin(source) + set_tracked_initalized(INITIALIZATION_INSSATOMS, source) + +/datum/controller/subsystem/atoms/proc/map_loader_stop(source) + clear_tracked_initalize(source) + +/// Returns the source currently modifying SSatom's init behavior +/datum/controller/subsystem/atoms/proc/get_initialized_source() + var/state_length = length(initialized_state) + if(!state_length) + return null + return initialized_state[state_length][1] + +/// Use this to set initialized to prevent error states where the old initialized is overriden, and we end up losing all context +/// Accepts a state and a source, the most recent state is used, sources exist to prevent overriding old values accidentially +/datum/controller/subsystem/atoms/proc/set_tracked_initalized(state, source) + if(!length(initialized_state)) + base_initialized = initialized + initialized_state += list(list(source, state)) + initialized = state + +/datum/controller/subsystem/atoms/proc/clear_tracked_initalize(source) + if(!length(initialized_state)) + return + for(var/i in length(initialized_state) to 1 step -1) + if(initialized_state[i][1] == source) + initialized_state.Cut(i, i+1) + break + + if(!length(initialized_state)) + initialized = base_initialized + base_initialized = INITIALIZATION_INNEW_REGULAR + return + initialized = initialized_state[length(initialized_state)][2] /datum/controller/subsystem/atoms/Recover() initialized = SSatoms.initialized if(initialized == INITIALIZATION_INNEW_MAPLOAD) InitializeAtoms() - old_initialized = SSatoms.old_initialized + initialized_state = SSatoms.initialized_state BadInitializeCalls = SSatoms.BadInitializeCalls /datum/controller/subsystem/atoms/proc/InitLog() @@ -189,7 +231,7 @@ SUBSYSTEM_DEF(atoms) if(fails & BAD_INIT_NO_HINT) . += "- Didn't return an Initialize hint\n" if(fails & BAD_INIT_QDEL_BEFORE) - . += "- Qdel'd in New()\n" + . += "- Qdel'd before Initialize proc ran\n" if(fails & BAD_INIT_SLEPT) . += "- Slept during Initialize()\n" diff --git a/code/controllers/subsystem/garbage.dm b/code/controllers/subsystem/garbage.dm index d524bf29803..40101f164a1 100644 --- a/code/controllers/subsystem/garbage.dm +++ b/code/controllers/subsystem/garbage.dm @@ -161,7 +161,10 @@ SUBSYSTEM_DEF(garbage) lastlevel = level - //We do this rather then for(var/refID in queue) because that sort of for loop copies the whole list. +// 1 from the hard reference in the queue, and 1 from the variable used before this +#define REFS_WE_EXPECT 2 + + //We do this rather then for(var/list/ref_info in queue) because that sort of for loop copies the whole list. //Normally this isn't expensive, but the gc queue can grow to 40k items, and that gets costly/causes overrun. for (var/i in 1 to length(queue)) var/list/L = queue[i] @@ -175,20 +178,16 @@ SUBSYSTEM_DEF(garbage) if(queued_at_time > cut_off_time) break // Everything else is newer, skip them count++ - var/GCd_at_time = L[GC_QUEUE_ITEM_GCD_DESTROYED] - - var/refID = L[GC_QUEUE_ITEM_REF] - var/datum/D - D = locate(refID) - if (!D || D.gc_destroyed != GCd_at_time) // So if something else coincidently gets the same ref, it's not deleted by mistake + var/datum/D = L[GC_QUEUE_ITEM_REF] + if(refcount(D) == REFS_WE_EXPECT) ++gcedlasttick ++totalgcs pass_counts[level]++ #ifdef REFERENCE_TRACKING - reference_find_on_fail -= refID //It's deleted we don't care anymore. + reference_find_on_fail -= text_ref(D) //It's deleted we don't care anymore. #endif - if (MC_TICK_CHECK) + if(MC_TICK_CHECK) return continue @@ -202,15 +201,18 @@ SUBSYSTEM_DEF(garbage) switch (level) if (GC_QUEUE_CHECK) #ifdef REFERENCE_TRACKING - if(reference_find_on_fail[refID]) - INVOKE_ASYNC(D, TYPE_PROC_REF(/datum,find_references)) + // Decides how many refs to look for (potentially) + // Based off the remaining and the ones we can account for + var/remaining_refs = refcount(D) - REFS_WE_EXPECT + if(reference_find_on_fail[text_ref(D)]) + INVOKE_ASYNC(D, TYPE_PROC_REF(/datum, find_references), remaining_refs) ref_searching = TRUE #ifdef GC_FAILURE_HARD_LOOKUP else - INVOKE_ASYNC(D, TYPE_PROC_REF(/datum,find_references)) + INVOKE_ASYNC(D, TYPE_PROC_REF(/datum, find_references), remaining_refs) ref_searching = TRUE #endif - reference_find_on_fail -= refID + reference_find_on_fail -= text_ref(D) #endif var/type = D.type var/datum/qdel_item/I = items[type] @@ -231,7 +233,7 @@ SUBSYSTEM_DEF(garbage) return //ref searching intentionally cancels all further fires while running so things that hold references don't end up getting deleted, so we want to return here instead of continue #endif continue - if (GC_QUEUE_HARDDELETE) + if(GC_QUEUE_HARDDELETE) HardDelete(D) if (MC_TICK_CHECK) return @@ -246,25 +248,26 @@ SUBSYSTEM_DEF(garbage) if (MC_TICK_CHECK) return - if (count) + if(count) queue.Cut(1,count+1) count = 0 +#undef REFS_WE_EXPECT + /datum/controller/subsystem/garbage/proc/Queue(datum/D, level = GC_QUEUE_FILTER) - if (isnull(D)) + if(isnull(D)) return - if (level > GC_QUEUE_COUNT) + if(level > GC_QUEUE_COUNT) HardDelete(D) return var/queue_time = world.time - var/refid = text_ref(D) - if (D.gc_destroyed <= 0) + if(D.gc_destroyed <= 0) D.gc_destroyed = queue_time var/list/queue = queues[level] - queue[++queue.len] = list(queue_time, refid, D.gc_destroyed) // not += for byond reasons + queue[++queue.len] = list(queue_time, D, D.gc_destroyed) // not += for byond reasons //this is mainly to separate things profile wise. /datum/controller/subsystem/garbage/proc/HardDelete(datum/D) @@ -325,75 +328,77 @@ SUBSYSTEM_DEF(garbage) /datum/qdel_item/New(mytype) name = "[mytype]" - /// Should be treated as a replacement for the 'del' keyword. /// /// Datums passed to this will be given a chance to clean up references to allow the GC to collect them. -/proc/qdel(datum/D, force=FALSE) - if(!istype(D)) - del(D) +/proc/qdel(datum/to_delete, force = FALSE) + if(!istype(to_delete)) + del(to_delete) return - var/datum/qdel_item/I = SSgarbage.items[D.type] - if (!I) - I = SSgarbage.items[D.type] = new /datum/qdel_item(D.type) - I.qdels++ + var/datum/qdel_item/trash = SSgarbage.items[to_delete.type] + if(!trash) + trash = SSgarbage.items[to_delete.type] = new /datum/qdel_item(to_delete.type) + trash.qdels++ - if(isnull(D.gc_destroyed)) - if (SEND_SIGNAL(D, COMSIG_PREQDELETED, force)) // Give the components a chance to prevent their parent from being deleted - return - D.gc_destroyed = GC_CURRENTLY_BEING_QDELETED - var/start_time = world.time - var/start_tick = world.tick_usage - SEND_SIGNAL(D, COMSIG_QDELETING, force) // Let the (remaining) components know about the result of Destroy - var/hint = D.Destroy(force) // Let our friend know they're about to get fucked up. - if(world.time != start_time) - I.slept_destroy++ - else - I.destroy_time += TICK_USAGE_TO_MS(start_tick) - if(!D) + if(!isnull(to_delete.gc_destroyed)) + if(to_delete.gc_destroyed == GC_CURRENTLY_BEING_QDELETED) + CRASH("[to_delete.type] destroy proc was called multiple times, likely due to a qdel loop in the Destroy logic") + return + + if(SEND_SIGNAL(to_delete, COMSIG_PREQDELETED, force)) // Give the components a chance to prevent their parent from being deleted + return + + to_delete.gc_destroyed = GC_CURRENTLY_BEING_QDELETED + var/start_time = world.time + var/start_tick = world.tick_usage + SEND_SIGNAL(to_delete, COMSIG_QDELETING, force) // Let the (remaining) components know about the result of Destroy + var/hint = to_delete.Destroy(force) // Let our friend know they're about to get fucked up. + if(world.time != start_time) + trash.slept_destroy++ + else + trash.destroy_time += TICK_USAGE_TO_MS(start_tick) + if(isnull(to_delete)) + return + switch(hint) + if(QDEL_HINT_QUEUE) //qdel should queue the object for deletion. + SSgarbage.Queue(to_delete) + if(QDEL_HINT_IWILLGC) + to_delete.gc_destroyed = world.time return - switch(hint) - if (QDEL_HINT_QUEUE) //qdel should queue the object for deletion. - SSgarbage.Queue(D) - if (QDEL_HINT_IWILLGC) - D.gc_destroyed = world.time + if(QDEL_HINT_LETMELIVE) //qdel should let the object live after calling destory. + if(!force) + to_delete.gc_destroyed = null //clear the gc variable (important!) return - if (QDEL_HINT_LETMELIVE) //qdel should let the object live after calling destory. - if(!force) - D.gc_destroyed = null //clear the gc variable (important!) - return - // Returning LETMELIVE after being told to force destroy - // indicates the objects Destroy() does not respect force - #ifdef TESTING - if(!I.no_respect_force) - testing("WARNING: [D.type] has been force deleted, but is \ - returning an immortal QDEL_HINT, indicating it does \ - not respect the force flag for qdel(). It has been \ - placed in the queue, further instances of this type \ - will also be queued.") - #endif - I.no_respect_force++ + // Returning LETMELIVE after being told to force destroy + // indicates the objects Destroy() does not respect force + #ifdef TESTING + if(!trash.no_respect_force) + testing("WARNING: [to_delete.type] has been force deleted, but is \ + returning an immortal QDEL_HINT, indicating it does \ + not respect the force flag for qdel(). It has been \ + placed in the queue, further instances of this type \ + will also be queued.") + #endif + trash.no_respect_force++ - SSgarbage.Queue(D) - if (QDEL_HINT_HARDDEL) //qdel should assume this object won't gc, and queue a hard delete - SSgarbage.Queue(D, GC_QUEUE_HARDDELETE) - if (QDEL_HINT_HARDDEL_NOW) //qdel should assume this object won't gc, and hard del it post haste. - SSgarbage.HardDelete(D) - #ifdef REFERENCE_TRACKING - if (QDEL_HINT_FINDREFERENCE) //qdel will, if REFERENCE_TRACKING is enabled, display all references to this object, then queue the object for deletion. - SSgarbage.Queue(D) - D.find_references() //This breaks ci. Consider it insurance against somehow pring reftracking on accident - if (QDEL_HINT_IFFAIL_FINDREFERENCE) //qdel will, if REFERENCE_TRACKING is enabled and the object fails to collect, display all references to this object. - SSgarbage.Queue(D) - SSgarbage.reference_find_on_fail[text_ref(D)] = TRUE + SSgarbage.Queue(to_delete) + if(QDEL_HINT_HARDDEL) //qdel should assume this object won't gc, and queue a hard delete + SSgarbage.Queue(to_delete, GC_QUEUE_HARDDELETE) + if(QDEL_HINT_HARDDEL_NOW) //qdel should assume this object won't gc, and hard del it post haste. + SSgarbage.HardDelete(to_delete) + #ifdef REFERENCE_TRACKING + if(QDEL_HINT_FINDREFERENCE) //qdel will, if REFERENCE_TRACKING is enabled, display all references to this object, then queue the object for deletion. + SSgarbage.Queue(to_delete) + to_delete.find_references() //This breaks ci. Consider it insurance against somehow pring reftracking on accident + if(QDEL_HINT_IFFAIL_FINDREFERENCE) //qdel will, if REFERENCE_TRACKING is enabled and the object fails to collect, display all references to this object. + SSgarbage.Queue(to_delete) + SSgarbage.reference_find_on_fail[text_ref(to_delete)] = TRUE + #endif + else + #ifdef TESTING + if(!trash.no_hint) + testing("WARNING: [to_delete.type] is not returning a qdel hint. It is being placed in the queue. Further instances of this type will also be queued.") #endif - else - #ifdef TESTING - if(!I.no_hint) - testing("WARNING: [D.type] is not returning a qdel hint. It is being placed in the queue. Further instances of this type will also be queued.") - #endif - I.no_hint++ - SSgarbage.Queue(D) - else if(D.gc_destroyed == GC_CURRENTLY_BEING_QDELETED) - CRASH("[D.type] destroy proc was called multiple times, likely due to a qdel loop in the Destroy logic") + trash.no_hint++ + SSgarbage.Queue(to_delete) diff --git a/code/controllers/subsystem/icon_smooth.dm b/code/controllers/subsystem/icon_smooth.dm index d4ddfeff7e0..0584d62c23c 100644 --- a/code/controllers/subsystem/icon_smooth.dm +++ b/code/controllers/subsystem/icon_smooth.dm @@ -9,6 +9,7 @@ SUBSYSTEM_DEF(icon_smooth) var/list/blueprint_queue = list() var/list/smooth_queue = list() var/list/deferred = list() + var/list/deferred_by_source = list() /datum/controller/subsystem/icon_smooth/Initialize() smooth_zlevel(1, TRUE) @@ -56,16 +57,30 @@ SUBSYSTEM_DEF(icon_smooth) else can_fire = FALSE +/// Releases a pool of delayed smooth attempts from a particular source +/datum/controller/subsystem/icon_smooth/proc/free_deferred(source_to_free) + smooth_queue += deferred_by_source[source_to_free] + deferred_by_source -= source_to_free + if(!can_fire) + can_fire = TRUE /datum/controller/subsystem/icon_smooth/proc/add_to_queue(atom/thing) if(thing.smoothing_flags & SMOOTH_QUEUED) return thing.smoothing_flags |= SMOOTH_QUEUED + // If we're currently locked into mapload BY something + // Then put us in a deferred list that we release when this mapload run is finished + if(initialized && length(SSatoms.initialized_state) && SSatoms.initialized == INITIALIZATION_INNEW_MAPLOAD) + var/source = SSatoms.get_initialized_source() + LAZYADD(deferred_by_source[source], thing) + return smooth_queue += thing if(!can_fire) can_fire = TRUE /datum/controller/subsystem/icon_smooth/proc/remove_from_queues(atom/thing) + // Lack of removal from deferred_by_source is safe because the lack of SMOOTH_QUEUED will just free it anyway + // Hopefully this'll never cause a harddel (dies) thing.smoothing_flags &= ~SMOOTH_QUEUED smooth_queue -= thing blueprint_queue -= thing diff --git a/code/controllers/subsystem/timer.dm b/code/controllers/subsystem/timer.dm index 35143d6f8cb..5f0b630fe25 100644 --- a/code/controllers/subsystem/timer.dm +++ b/code/controllers/subsystem/timer.dm @@ -4,8 +4,6 @@ #define BUCKET_POS(timer) (((round((timer.timeToRun - timer.timer_subsystem.head_offset) / world.tick_lag)+1) % BUCKET_LEN)||BUCKET_LEN) /// Gets the maximum time at which timers will be invoked from buckets, used for deferring to secondary queue #define TIMER_MAX(timer_ss) (timer_ss.head_offset + TICKS2DS(BUCKET_LEN + timer_ss.practical_offset - 1)) -/// Max float with integer precision -#define TIMER_ID_MAX (2**24) /** * # Timer Subsystem @@ -656,4 +654,3 @@ SUBSYSTEM_DEF(timer) #undef BUCKET_LEN #undef BUCKET_POS #undef TIMER_MAX -#undef TIMER_ID_MAX diff --git a/code/datums/datum.dm b/code/datums/datum.dm index f7061192117..d257a149d01 100644 --- a/code/datums/datum.dm +++ b/code/datums/datum.dm @@ -67,8 +67,12 @@ #endif #ifdef REFERENCE_TRACKING - var/running_find_references + /// When was this datum last touched by a reftracker? + /// If this value doesn't match with the start of the search + /// We know this datum has never been seen before, and we should check it var/last_find_references = 0 + /// How many references we're trying to find when searching + var/references_to_clear = 0 #ifdef REFERENCE_TRACKING_DEBUG ///Stores info about where refs are found, used for sanity checks and testing var/list/found_refs diff --git a/code/game/objects/machinery/computer/camera.dm b/code/game/objects/machinery/computer/camera.dm index 1b2a948e1eb..39d56b6924d 100644 --- a/code/game/objects/machinery/computer/camera.dm +++ b/code/game/objects/machinery/computer/camera.dm @@ -1,3 +1,5 @@ +#define DEFAULT_MAP_SIZE 15 + /obj/machinery/computer/camera name = "security camera console" desc = "Used to access the various cameras on the station." @@ -15,7 +17,6 @@ // Stuff needed to render the map var/map_name - var/const/default_map_size = 15 var/atom/movable/screen/map_view/cam_screen /// All the plane masters that need to be applied. var/list/cam_plane_masters @@ -51,9 +52,9 @@ cam_background.del_on_map_removal = FALSE /obj/machinery/computer/camera/Destroy() - qdel(cam_screen) + QDEL_NULL(cam_screen) QDEL_LIST(cam_plane_masters) - qdel(cam_background) + QDEL_NULL(cam_background) return ..() /obj/machinery/computer/camera/connect_to_shuttle(obj/docking_port/mobile/port, obj/docking_port/stationary/dock, idnum, override=FALSE) @@ -157,7 +158,7 @@ /obj/machinery/computer/camera/proc/show_camera_static() cam_screen.vis_contents.Cut() cam_background.icon_state = "scanline2" - cam_background.fill_rect(1, 1, default_map_size, default_map_size) + cam_background.fill_rect(1, 1, DEFAULT_MAP_SIZE, DEFAULT_MAP_SIZE) // Returns the list of cameras accessible from this computer /obj/machinery/computer/camera/proc/get_available_cameras() @@ -177,3 +178,5 @@ if(length(tempnetwork)) valid_cams["[C.c_tag]"] = C return valid_cams + +#undef DEFAULT_MAP_SIZE diff --git a/code/game/objects/objs.dm b/code/game/objects/objs.dm index b8fa0441f96..92934f26e53 100644 --- a/code/game/objects/objs.dm +++ b/code/game/objects/objs.dm @@ -222,6 +222,8 @@ return /mob/proc/set_machine(obj/O) + if(QDELETED(src) || QDELETED(O)) + return if(machine) unset_machine() machine = O diff --git a/code/game/objects/structures/dropship_equipment.dm b/code/game/objects/structures/dropship_equipment.dm index 1be2e7a184f..eceb4cb2580 100644 --- a/code/game/objects/structures/dropship_equipment.dm +++ b/code/game/objects/structures/dropship_equipment.dm @@ -359,12 +359,16 @@ if(!deployed_turret) var/obj/new_gun = new sentry_type(src) deployed_turret = new_gun.loc - RegisterSignal(deployed_turret, COMSIG_OBJ_DECONSTRUCT, PROC_REF(clean_refs)) + RegisterSignal(deployed_turret, COMSIG_QDELETING, PROC_REF(clean_refs)) + +/obj/structure/dropship_equipment/shuttle/sentry_holder/Destroy() + deployed_turret = null + return ..() ///This cleans the deployed_turret ref when the sentry is destroyed. /obj/structure/dropship_equipment/shuttle/sentry_holder/proc/clean_refs(atom/source, disassembled) SIGNAL_HANDLER - UnregisterSignal(deployed_turret, COMSIG_OBJ_DECONSTRUCT) + UnregisterSignal(deployed_turret, COMSIG_QDELETING) deployed_turret = null dropship_equipment_flags &= ~IS_NOT_REMOVABLE diff --git a/code/modules/admin/view_variables/reference_tracking.dm b/code/modules/admin/view_variables/reference_tracking.dm index e4801e8722b..e351dffe2e2 100644 --- a/code/modules/admin/view_variables/reference_tracking.dm +++ b/code/modules/admin/view_variables/reference_tracking.dm @@ -1,34 +1,30 @@ #ifdef REFERENCE_TRACKING +#define REFSEARCH_RECURSE_LIMIT 64 -/datum/proc/find_references(skip_alert) - running_find_references = type +/datum/proc/find_references(references_to_clear = INFINITY) if(usr?.client) - if(usr.client.running_find_references) - log_reftracker("CANCELLED search for references to a [usr.client.running_find_references].") - usr.client.running_find_references = null - running_find_references = null - //restart the garbage collector - SSgarbage.can_fire = TRUE - SSgarbage.next_fire = world.time + world.tick_lag - return - - if(!skip_alert && tgui_alert(usr,"Running this will lock everything up for about 5 minutes. Would you like to begin the search?", "Find References", list("Yes", "No")) != "Yes") - running_find_references = null + if(tgui_alert(usr, "Running this will lock everything up for about 5 minutes. Would you like to begin the search?", "Find References", list("Yes", "No")) != "Yes") return + src.references_to_clear = references_to_clear //this keeps the garbage collector from failing to collect objects being searched for in here SSgarbage.can_fire = FALSE - if(usr?.client) - usr.client.running_find_references = type + _search_references() + //restart the garbage collector + SSgarbage.can_fire = TRUE + SSgarbage.update_nextfire(reset_time = TRUE) - log_reftracker("Beginning search for references to a [type].") +/datum/proc/_search_references() + log_reftracker("Beginning search for references to a [type], looking for [references_to_clear] refs.") var/starting_time = world.time //Time to search the whole game for our ref - DoSearchVar(GLOB, "GLOB", search_time = starting_time) //globals + DoSearchVar(GLOB, "GLOB", starting_time) //globals log_reftracker("Finished searching globals") + if(src.references_to_clear == 0) + return //Yes we do actually need to do this. The searcher refuses to read weird lists //And global.vars is a really weird list @@ -36,44 +32,46 @@ for(var/key in global.vars) global_vars[key] = global.vars[key] - DoSearchVar(global_vars, "Native Global", search_time = starting_time) + DoSearchVar(global_vars, "Native Global", starting_time) log_reftracker("Finished searching native globals") + if(src.references_to_clear == 0) + return for(var/datum/thing in world) //atoms (don't beleive its lies) - DoSearchVar(thing, "World -> [thing.type]", search_time = starting_time) + DoSearchVar(thing, "World -> [thing.type]", starting_time) + if(src.references_to_clear == 0) + break log_reftracker("Finished searching atoms") + if(src.references_to_clear == 0) + return for(var/datum/thing) //datums - DoSearchVar(thing, "Datums -> [thing.type]", search_time = starting_time) + DoSearchVar(thing, "Datums -> [thing.type]", starting_time) + if(src.references_to_clear == 0) + break log_reftracker("Finished searching datums") + if(src.references_to_clear == 0) + return //Warning, attempting to search clients like this will cause crashes if done on live. Watch yourself #ifndef REFERENCE_DOING_IT_LIVE for(var/client/thing) //clients - DoSearchVar(thing, "Clients -> [thing.type]", search_time = starting_time) + DoSearchVar(thing, "Clients -> [thing.type]", starting_time) + if(src.references_to_clear == 0) + break log_reftracker("Finished searching clients") + if(src.references_to_clear == 0) + return #endif log_reftracker("Completed search for references to a [type].") - if(usr?.client) - usr.client.running_find_references = null - running_find_references = null - - //restart the garbage collector - SSgarbage.can_fire = TRUE - SSgarbage.next_fire = world.time + world.tick_lag - -/datum/proc/DoSearchVar(potential_container, container_name, recursive_limit = 64, search_time = world.time) - #ifdef REFERENCE_TRACKING_DEBUG - if(SSgarbage.should_save_refs && !found_refs) - found_refs = list() - #endif - - if(usr?.client && !usr.client.running_find_references) +/datum/proc/DoSearchVar(potential_container, container_name, search_time, recursion_count, is_special_list) + if(recursion_count >= REFSEARCH_RECURSE_LIMIT) + log_reftracker("Recursion limit reached. [container_name]") return - if(!recursive_limit) + if(references_to_clear == 0) log_reftracker("Recursion limit reached. [container_name]") return @@ -88,73 +86,117 @@ return datum_container.last_find_references = search_time - var/container_print = datum_container.ref_search_details() var/list/vars_list = datum_container.vars + var/is_atom = FALSE + var/is_area = FALSE + if(isatom(datum_container)) + is_atom = TRUE + if(isarea(datum_container)) + is_area = TRUE + for(var/varname in vars_list) - #ifndef FIND_REF_NO_CHECK_TICK - CHECK_TICK - #endif - if (varname == "vars" || varname == "vis_locs") //Fun fact, vis_locs don't count for references - continue var/variable = vars_list[varname] - if(variable == src) + if(islist(variable)) + //Fun fact, vis_locs don't count for references + if(varname == "vars" || (is_atom && (varname == "vis_locs" || varname == "overlays" || varname == "underlays" || varname == "filters" || varname == "verbs" || (is_area && varname == "contents")))) + continue + // We do this after the varname check to avoid area contents (reading it incures a world loop's worth of cost) + if(!length(variable)) + continue + DoSearchVar(variable,\ + "[container_name] [datum_container.ref_search_details()] -> [varname] (list)",\ + search_time,\ + recursion_count + 1,\ + /*is_special_list = */ is_atom && (varname == "contents" || varname == "vis_contents" || varname == "locs")) + else if(variable == src) #ifdef REFERENCE_TRACKING_DEBUG if(SSgarbage.should_save_refs) + if(!found_refs) + found_refs = list() found_refs[varname] = TRUE continue //End early, don't want these logging + else + log_reftracker("Found [type] [text_ref(src)] in [datum_container.type]'s [datum_container.ref_search_details()] [varname] var. [container_name]") + #else + log_reftracker("Found [type] [text_ref(src)] in [datum_container.type]'s [datum_container.ref_search_details()] [varname] var. [container_name]") #endif - log_reftracker("Found [type] \ref[src] in [datum_container.type]'s [container_print] [varname] var. [container_name]") + references_to_clear -= 1 + if(references_to_clear == 0) + log_reftracker("All references to [type] [text_ref(src)] found, exiting.") + return continue - if(islist(variable)) - DoSearchVar(variable, "[container_name] [container_print] -> [varname] (list)", recursive_limit - 1, search_time) - else if(islist(potential_container)) - var/normal = IS_NORMAL_LIST(potential_container) var/list/potential_cache = potential_container for(var/element_in_list in potential_cache) - #ifndef FIND_REF_NO_CHECK_TICK - CHECK_TICK - #endif + //Check normal sublists + if(islist(element_in_list)) + if(length(element_in_list)) + DoSearchVar(element_in_list, "[container_name] -> [element_in_list] (list)", search_time, recursion_count + 1) //Check normal entrys - if(element_in_list == src) + else if(element_in_list == src) #ifdef REFERENCE_TRACKING_DEBUG if(SSgarbage.should_save_refs) + if(!found_refs) + found_refs = list() found_refs[potential_cache] = TRUE - continue //End early, don't want these logging - #endif + continue + else + log_reftracker("Found [type] [text_ref(src)] in list [container_name].") + #else log_reftracker("Found [type] \ref[src] in list [container_name].") - continue - - var/assoc_val = null - if(!isnum(element_in_list) && normal) - assoc_val = potential_cache[element_in_list] - //Check assoc entrys - if(assoc_val == src) - #ifdef REFERENCE_TRACKING_DEBUG - if(SSgarbage.should_save_refs) - found_refs[potential_cache] = TRUE - continue //End early, don't want these logging #endif - log_reftracker("Found [type] \ref[src] in list [container_name]\[[element_in_list]\]") - continue - //We need to run both of these checks, since our object could be hiding in either of them - //Check normal sublists - if(islist(element_in_list)) - DoSearchVar(element_in_list, "[container_name] -> [element_in_list] (list)", recursive_limit - 1, search_time) - //Check assoc sublists - if(islist(assoc_val)) - DoSearchVar(potential_container[element_in_list], "[container_name]\[[element_in_list]\] -> [assoc_val] (list)", recursive_limit - 1, search_time) - -/proc/qdel_and_find_ref_if_fail(datum/thing_to_del, force = FALSE) - thing_to_del.qdel_and_find_ref_if_fail(force) - -/datum/proc/qdel_and_find_ref_if_fail(force = FALSE) - SSgarbage.reference_find_on_fail[text_ref(src)] = TRUE - qdel(src, force) - + // This is dumb as hell I'm sorry + // I don't want the garbage subsystem to count as a ref for the purposes of this number + // If we find all other refs before it I want to early exit, and if we don't I want to keep searching past it + var/ignore_ref = FALSE + var/list/queues = SSgarbage.queues + for(var/list/queue in queues) + if(potential_cache in queue) + ignore_ref = TRUE + break + if(ignore_ref) + log_reftracker("[container_name] does not count as a ref for our count") + else + references_to_clear -= 1 + if(references_to_clear == 0) + log_reftracker("All references to [type] [text_ref(src)] found, exiting.") + return + + if(!isnum(element_in_list) && !is_special_list) + // This exists to catch an error that throws when we access a special list + // is_special_list is a hint, it can be wrong + try + var/assoc_val = potential_cache[element_in_list] + //Check assoc sublists + if(islist(assoc_val)) + if(length(assoc_val)) + DoSearchVar(potential_container[element_in_list], "[container_name]\[[element_in_list]\] -> [assoc_val] (list)", search_time, recursion_count + 1) + //Check assoc entry + else if(assoc_val == src) + #ifdef REFERENCE_TRACKING_DEBUG + if(SSgarbage.should_save_refs) + if(!found_refs) + found_refs = list() + found_refs[potential_cache] = TRUE + continue + else + log_reftracker("Found [type] [text_ref(src)] in list [container_name]\[[element_in_list]\]") + #else + log_reftracker("Found [type] [text_ref(src)] in list [container_name]\[[element_in_list]\]") + #endif + references_to_clear -= 1 + if(references_to_clear == 0) + log_reftracker("All references to [type] [text_ref(src)] found, exiting.") + return + catch + // So if it goes wrong we kill it + is_special_list = TRUE + log_reftracker("Curiosity: [container_name] lead to an error when acessing [element_in_list], what is it?") + +#undef REFSEARCH_RECURSE_LIMIT #endif // Kept outside the ifdef so overrides are easy to implement diff --git a/code/modules/clothing/modular_armor/attachments.dm b/code/modules/clothing/modular_armor/attachments.dm index 2f1f9de34ef..9e9b8d697b7 100644 --- a/code/modules/clothing/modular_armor/attachments.dm +++ b/code/modules/clothing/modular_armor/attachments.dm @@ -67,6 +67,10 @@ AddComponent(/datum/component/attachment_handler, attachments_by_slot, attachments_allowed, starting_attachments = starting_attachments) update_icon() +/obj/item/armor_module/Destroy() + parent = null + return ..() + /// Called before a module is attached. /obj/item/armor_module/proc/can_attach(obj/item/attaching_to, mob/user) return TRUE diff --git a/code/modules/mapping/reader.dm b/code/modules/mapping/reader.dm index 5c031adda34..81c22fc7a28 100644 --- a/code/modules/mapping/reader.dm +++ b/code/modules/mapping/reader.dm @@ -335,7 +335,7 @@ first_turf_index++ //turn off base new Initialization until the whole thing is loaded - SSatoms.map_loader_begin() + SSatoms.map_loader_begin(REF(src)) //instanciate the first /turf var/turf/T if(members[first_turf_index] != /turf/template_noop) @@ -354,7 +354,7 @@ for(index in 1 to first_turf_index-1) instance_atom(members[index], members_attributes[index], crds, no_changeturf, placeOnTop, delete) //Restore initialization to the previous value - SSatoms.map_loader_stop() + SSatoms.map_loader_stop(REF(src)) //////////////// //Helpers procs @@ -384,9 +384,9 @@ //custom CHECK_TICK here because we don't want things created while we're sleeping to not initialize if(TICK_CHECK) - SSatoms.map_loader_stop() + SSatoms.map_loader_stop(REF(src)) stoplag() - SSatoms.map_loader_begin() + SSatoms.map_loader_begin(REF(src)) /datum/parsed_map/proc/create_atom(path, crds) set waitfor = FALSE diff --git a/code/modules/mob/mob.dm b/code/modules/mob/mob.dm index d911392db80..5ab1d372e7e 100644 --- a/code/modules/mob/mob.dm +++ b/code/modules/mob/mob.dm @@ -1,5 +1,9 @@ - -/mob/Destroy()//This makes sure that mobs with clients/keys are not just deleted from the game. +/mob/Destroy() + if(client) + stack_trace("Mob with client has been deleted.") + else if(ckey) + stack_trace("Mob without client but with associated ckey has been deleted.") + unset_machine() GLOB.mob_list -= src GLOB.dead_mob_list -= src GLOB.offered_mob_list -= src diff --git a/code/modules/predator/yautja/bracers.dm b/code/modules/predator/yautja/bracers.dm index 72778460c37..81dba336a05 100644 --- a/code/modules/predator/yautja/bracers.dm +++ b/code/modules/predator/yautja/bracers.dm @@ -72,7 +72,16 @@ var/minimap_icon = "predator" COOLDOWN_DECLARE(bracer_recharge) -/obj/item/clothing/gloves/yautja/Destroy() +/obj/item/clothing/gloves/yautja/Destroy() // FUCKING SHITCODE + left_wristblades = null + right_wristblades = null + combistick = null + discs.Cut() + real_owner = null + owner = null + QDEL_NULL(caster) + QDEL_NULL(embedded_id) + QDEL_LIST(actions_to_add) STOP_PROCESSING(SSobj, src) if(linked_bracer) linked_bracer.linked_bracer = null @@ -1033,11 +1042,6 @@ left_wristblades = new(src) right_wristblades = new(src) -/obj/item/clothing/gloves/yautja/hunter/Destroy() - . = ..() - left_wristblades = null - right_wristblades = null - /obj/item/clothing/gloves/yautja/hunter/emp_act(severity) charge = max(charge - (severity * 500), 0) if(ishuman(loc)) @@ -1058,11 +1062,6 @@ if(embedded_id?.registered_name) embedded_id.set_user_data(user) -/obj/item/clothing/gloves/yautja/hunter/Destroy() - QDEL_NULL(caster) - QDEL_NULL(embedded_id) - return ..() - /obj/item/clothing/gloves/yautja/hunter/process() if(!ishuman(loc)) STOP_PROCESSING(SSobj, src) diff --git a/code/modules/projectiles/attachables/_attachable.dm b/code/modules/projectiles/attachables/_attachable.dm index 09965c6f055..baa15e3eff4 100644 --- a/code/modules/projectiles/attachables/_attachable.dm +++ b/code/modules/projectiles/attachables/_attachable.dm @@ -120,7 +120,6 @@ inaccurate. Don't worry if force is ever negative, it won't runtime. ///only used by lace, denotes whether the lace is currently deployed var/lace_deployed = FALSE - ///what ability to give the user when attached to a weapon they are holding. var/attachment_action_type ///used for the codex to denote if a weapon has the ability to zoom in or not. diff --git a/code/modules/projectiles/gun_system.dm b/code/modules/projectiles/gun_system.dm index 47d050c03ed..657b3f66776 100644 --- a/code/modules/projectiles/gun_system.dm +++ b/code/modules/projectiles/gun_system.dm @@ -401,6 +401,7 @@ INVOKE_ASYNC(src, PROC_REF(fill_gun)) /obj/item/weapon/gun/Destroy() + master_gun = null active_attachable = null gunattachment = null QDEL_NULL(muzzle_flash) diff --git a/code/modules/reqs/ui/vehicle.dm b/code/modules/reqs/ui/vehicle.dm index 8b08be0395f..c69bb673d52 100644 --- a/code/modules/reqs/ui/vehicle.dm +++ b/code/modules/reqs/ui/vehicle.dm @@ -12,11 +12,11 @@ GLOBAL_LIST_EMPTY(purchased_tanks) vehtype = new vehtype GLOB.armored_modtypes[vehtype.type] = vehtype.permitted_mods .[vehtype.type] = vehtype.permitted_weapons - qdel(vehtype) + vehtype.Destroy() for(var/obj/item/armored_weapon/gun AS in typesof(/obj/item/armored_weapon)) gun = new gun GLOB.armored_gunammo[gun.type] = gun.accepted_ammo - qdel(gun) + gun.Destroy() /datum/supply_ui/vehicles tgui_name = "VehicleSupply" diff --git a/code/modules/tgs/v3210/api.dm b/code/modules/tgs/v3210/api.dm index 666201a3225..aaf9671db7b 100644 --- a/code/modules/tgs/v3210/api.dm +++ b/code/modules/tgs/v3210/api.dm @@ -99,11 +99,7 @@ if(skip_compat_check && !fexists(SERVICE_INTERFACE_DLL)) TGS_ERROR_LOG("Service parameter present but no interface DLL detected. This is symptomatic of running a service less than version 3.1! Please upgrade.") return - #if DM_VERSION >= 515 call_ext(SERVICE_INTERFACE_DLL, SERVICE_INTERFACE_FUNCTION)(instance_name, command) //trust no retval - #else - call(SERVICE_INTERFACE_DLL, SERVICE_INTERFACE_FUNCTION)(instance_name, command) //trust no retval - #endif return TRUE /datum/tgs_api/v3210/OnTopic(T) diff --git a/code/modules/unit_tests/create_and_destroy.dm b/code/modules/unit_tests/create_and_destroy.dm index a3dc08624f6..e34fc2f552b 100644 --- a/code/modules/unit_tests/create_and_destroy.dm +++ b/code/modules/unit_tests/create_and_destroy.dm @@ -155,7 +155,7 @@ GLOBAL_VAR_INIT(running_create_and_destroy, FALSE) if(fails & BAD_INIT_NO_HINT) Fail("[path] didn't return an Initialize hint") if(fails & BAD_INIT_QDEL_BEFORE) - Fail("[path] qdel'd in New()") + Fail("[path] qdel'd before we could call Initialize()") if(fails & BAD_INIT_SLEPT) Fail("[path] slept during Initialize()") diff --git a/code/modules/unit_tests/find_reference_sanity.dm b/code/modules/unit_tests/find_reference_sanity.dm index 3f2addd2315..a1637f7ea85 100644 --- a/code/modules/unit_tests/find_reference_sanity.dm +++ b/code/modules/unit_tests/find_reference_sanity.dm @@ -15,6 +15,8 @@ return ..() /atom/movable/ref_test + // Gotta make sure we do a full check + references_to_clear = INFINITY var/atom/movable/ref_test/self_ref /atom/movable/ref_test/Destroy(force) @@ -29,9 +31,9 @@ //Sanity check var/refcount = refcount(victim) TEST_ASSERT_EQUAL(refcount, 3, "Should be: test references: 0 + baseline references: 3 (victim var,loc,allocated list)") - victim.DoSearchVar(testbed, "Sanity Check", search_time = 1) //We increment search time to get around an optimization + victim.DoSearchVar(testbed, "Sanity Check") //We increment search time to get around an optimization - TEST_ASSERT(!victim.found_refs.len, "The ref-tracking tool found a ref where none existed") + TEST_ASSERT(!LAZYLEN(victim.found_refs), "The ref-tracking tool found a ref where none existed") SSgarbage.should_save_refs = FALSE /datum/unit_test/find_reference_baseline/Run() @@ -46,11 +48,11 @@ var/refcount = refcount(victim) TEST_ASSERT_EQUAL(refcount, 6, "Should be: test references: 3 + baseline references: 3 (victim var,loc,allocated list)") - victim.DoSearchVar(testbed, "First Run", search_time = 2) + victim.DoSearchVar(testbed, "First Run") - TEST_ASSERT(victim.found_refs["test"], "The ref-tracking tool failed to find a regular value") - TEST_ASSERT(victim.found_refs[testbed.test_list], "The ref-tracking tool failed to find a list entry") - TEST_ASSERT(victim.found_refs[testbed.test_assoc_list], "The ref-tracking tool failed to find an assoc list value") + TEST_ASSERT(LAZYACCESS(victim.found_refs, "test"), "The ref-tracking tool failed to find a regular value") + TEST_ASSERT(LAZYACCESS(victim.found_refs, testbed.test_list), "The ref-tracking tool failed to find a list entry") + TEST_ASSERT(LAZYACCESS(victim.found_refs, testbed.test_assoc_list), "The ref-tracking tool failed to find an assoc list value") SSgarbage.should_save_refs = FALSE /datum/unit_test/find_reference_exotic/Run() @@ -65,12 +67,12 @@ var/refcount = refcount(victim) TEST_ASSERT_EQUAL(refcount, 6, "Should be: test references: 3 + baseline references: 3 (victim var,loc,allocated list)") - victim.DoSearchVar(testbed, "Second Run", search_time = 3) + victim.DoSearchVar(testbed, "Second Run") //This is another sanity check - TEST_ASSERT(!victim.found_refs[testbed.overlays], "The ref-tracking tool found an overlays entry? That shouldn't be possible") - TEST_ASSERT(victim.found_refs[testbed.vis_contents], "The ref-tracking tool failed to find a vis_contents entry") - TEST_ASSERT(victim.found_refs[testbed.test_assoc_list], "The ref-tracking tool failed to find an assoc list key") + TEST_ASSERT(!LAZYACCESS(victim.found_refs, testbed.overlays), "The ref-tracking tool found an overlays entry? That shouldn't be possible") + TEST_ASSERT(LAZYACCESS(victim.found_refs, testbed.vis_contents), "The ref-tracking tool failed to find a vis_contents entry") + TEST_ASSERT(LAZYACCESS(victim.found_refs, testbed.test_assoc_list), "The ref-tracking tool failed to find an assoc list key") SSgarbage.should_save_refs = FALSE /datum/unit_test/find_reference_esoteric/Run() @@ -87,11 +89,11 @@ var/refcount = refcount(victim) TEST_ASSERT_EQUAL(refcount, 6, "Should be: test references: 3 + baseline references: 3 (victim var,loc,allocated list)") - victim.DoSearchVar(victim, "Third Run Self", search_time = 4) - victim.DoSearchVar(testbed, "Third Run Testbed", search_time = 4) - TEST_ASSERT(victim.found_refs["self_ref"], "The ref-tracking tool failed to find a self reference") - TEST_ASSERT(victim.found_refs[to_find], "The ref-tracking tool failed to find a nested list entry") - TEST_ASSERT(victim.found_refs[to_find_assoc], "The ref-tracking tool failed to find a nested assoc list entry") + victim.DoSearchVar(victim, "Third Run Self") + victim.DoSearchVar(testbed, "Third Run Testbed") + TEST_ASSERT(LAZYACCESS(victim.found_refs, "self_ref"), "The ref-tracking tool failed to find a self reference") + TEST_ASSERT(LAZYACCESS(victim.found_refs, to_find), "The ref-tracking tool failed to find a nested list entry") + TEST_ASSERT(LAZYACCESS(victim.found_refs, to_find_assoc), "The ref-tracking tool failed to find a nested assoc list entry") SSgarbage.should_save_refs = FALSE /datum/unit_test/find_reference_null_key_entry/Run() @@ -103,7 +105,7 @@ testbed.test_assoc_list = list(null = victim) var/refcount = refcount(victim) TEST_ASSERT_EQUAL(refcount, 4, "Should be: test references: 1 + baseline references: 3 (victim var,loc,allocated list)") - victim.DoSearchVar(testbed, "Fourth Run", search_time = 5) + victim.DoSearchVar(testbed, "Fourth Run") TEST_ASSERT(testbed.test_assoc_list, "The ref-tracking tool failed to find a null key'd assoc list entry") @@ -120,10 +122,10 @@ var/refcount = refcount(victim) TEST_ASSERT_EQUAL(refcount, 5, "Should be: test references: 2 + baseline references: 3 (victim var,loc,allocated list)") - victim.DoSearchVar(testbed, "Fifth Run", search_time = 6) + victim.DoSearchVar(testbed, "Fifth Run") - TEST_ASSERT(victim.found_refs[to_find_in_key], "The ref-tracking tool failed to find a nested assoc list key") - TEST_ASSERT(victim.found_refs[to_find_null_assoc_nested], "The ref-tracking tool failed to find a null key'd nested assoc list entry") + TEST_ASSERT(LAZYACCESS(victim.found_refs, to_find_in_key), "The ref-tracking tool failed to find a nested assoc list key") + TEST_ASSERT(LAZYACCESS(victim.found_refs, to_find_null_assoc_nested), "The ref-tracking tool failed to find a null key'd nested assoc list entry") SSgarbage.should_save_refs = FALSE /datum/unit_test/find_reference_static_investigation/Run() @@ -143,7 +145,7 @@ var/refcount = refcount(victim) TEST_ASSERT_EQUAL(refcount, 5, "Should be: test references: 2 + baseline references: 3 (victim var,loc,allocated list)") - victim.DoSearchVar(global_vars, "Sixth Run", search_time = 7) + victim.DoSearchVar(global_vars, "Sixth Run") - TEST_ASSERT(victim.found_refs[global_vars], "The ref-tracking tool failed to find a natively global variable") + TEST_ASSERT(LAZYACCESS(victim.found_refs, global_vars), "The ref-tracking tool failed to find a natively global variable") SSgarbage.should_save_refs = FALSE diff --git a/code/modules/vehicles/armored/interiors/chairs.dm b/code/modules/vehicles/armored/interiors/chairs.dm index 1645d53a854..fb258e0b115 100644 --- a/code/modules/vehicles/armored/interiors/chairs.dm +++ b/code/modules/vehicles/armored/interiors/chairs.dm @@ -58,6 +58,10 @@ var/obj/vehicle/sealed/armored/owner buildstacktype = null +/obj/structure/bed/chair/vehicle_gunner_seat/Destroy() + owner = null + return ..() + /obj/structure/bed/chair/vehicle_gunner_seat/link_interior(datum/interior/link) if(!istype(link, /datum/interior/armored)) CRASH("invalid interior [link.type] passed to [name]") @@ -96,6 +100,10 @@ var/obj/vehicle/sealed/armored/owner buildstacktype = null +/obj/structure/bed/chair/driver_gunner_seat/Destroy() + owner = null + return ..() + /obj/structure/bed/chair/driver_gunner_seat/link_interior(datum/interior/link) if(!istype(link, /datum/interior/armored)) CRASH("invalid interior [link.type] passed to [name]") diff --git a/code/modules/xenomorph/xeno_towers.dm b/code/modules/xenomorph/xeno_towers.dm index 483910ec978..299a725b7a0 100644 --- a/code/modules/xenomorph/xeno_towers.dm +++ b/code/modules/xenomorph/xeno_towers.dm @@ -70,7 +70,7 @@ SSminimaps.add_marker(src, MINIMAP_FLAG_XENO, image('icons/UI_icons/map_blips.dmi', null, "phero", ABOVE_FLOAT_LAYER)) // RU TGMC edit - map blips GLOB.hive_datums[hivenumber].pherotowers += src -//Pheromone towers start off with recovery. + //Pheromone towers start off with recovery. current_aura = SSaura.add_emitter(src, AURA_XENO_RECOVERY, aura_radius, aura_strength, -1, FACTION_XENO, hivenumber) playsound(src, SFX_ALIEN_DROOL, 25) update_icon() @@ -80,6 +80,7 @@ /obj/structure/xeno/pherotower/Destroy() GLOB.hive_datums[hivenumber].pherotowers -= src + QDEL_NULL(current_aura) return ..() // Clicking on the tower brings up a radial menu that allows you to select the type of pheromone that this tower will emit.