diff --git a/goblint.opam b/goblint.opam index 661222805b..d019379dd4 100644 --- a/goblint.opam +++ b/goblint.opam @@ -74,12 +74,11 @@ dev-repo: "git+https://github.com/goblint/analyzer.git" # on `dune build` goblint.opam will be generated from goblint.opam.template and dune-project # also remember to generate/adjust goblint.opam.locked! available: os-distribution != "alpine" & arch != "arm64" -# pin-depends: [ - # published goblint-cil 2.0.2 is currently up-to-date, so no pin needed - # [ "goblint-cil.2.0.2" "git+https://github.com/goblint/cil.git#98598d94f796a63751e5a9d39c6b3a9fe1f32330" ] +pin-depends: [ + [ "goblint-cil.2.0.2" "git+https://github.com/goblint/cil.git#398dca3d94a06a9026b3737aabf100ee3498229f" ] # TODO: add back after release, only pinned for optimization (https://github.com/ocaml-ppx/ppx_deriving/pull/252) - # [ "ppx_deriving.5.2.1" "git+https://github.com/ocaml-ppx/ppx_deriving.git#0a89b619f94cbbfc3b0fb3255ab4fe5bc77d32d6" ] -# ] + [ "ppx_deriving.5.2.1" "git+https://github.com/ocaml-ppx/ppx_deriving.git#0a89b619f94cbbfc3b0fb3255ab4fe5bc77d32d6" ] +] post-messages: [ "Do not benchmark Goblint on OCaml 5 (https://goblint.readthedocs.io/en/latest/user-guide/benchmarking/)." {ocaml:version >= "5.0.0"} ] diff --git a/goblint.opam.locked b/goblint.opam.locked index bb59c41dd1..ebe024dadd 100644 --- a/goblint.opam.locked +++ b/goblint.opam.locked @@ -128,3 +128,14 @@ conflicts: [ post-messages: [ "Do not benchmark Goblint on OCaml 5 (https://goblint.readthedocs.io/en/latest/user-guide/benchmarking/)." {ocaml:version >= "5.0.0"} ] +# TODO: manually reordered to avoid opam pin crash: https://github.com/ocaml/opam/issues/4936 +pin-depends: [ + [ + "goblint-cil.2.0.2" + "git+https://github.com/goblint/cil.git#398dca3d94a06a9026b3737aabf100ee3498229f" + ] + [ + "ppx_deriving.5.2.1" + "git+https://github.com/ocaml-ppx/ppx_deriving.git#0a89b619f94cbbfc3b0fb3255ab4fe5bc77d32d6" + ] +] diff --git a/goblint.opam.template b/goblint.opam.template index 6259c4d498..a493861e96 100644 --- a/goblint.opam.template +++ b/goblint.opam.template @@ -1,12 +1,11 @@ # on `dune build` goblint.opam will be generated from goblint.opam.template and dune-project # also remember to generate/adjust goblint.opam.locked! available: os-distribution != "alpine" & arch != "arm64" -# pin-depends: [ - # published goblint-cil 2.0.2 is currently up-to-date, so no pin needed - # [ "goblint-cil.2.0.2" "git+https://github.com/goblint/cil.git#98598d94f796a63751e5a9d39c6b3a9fe1f32330" ] +pin-depends: [ + [ "goblint-cil.2.0.2" "git+https://github.com/goblint/cil.git#398dca3d94a06a9026b3737aabf100ee3498229f" ] # TODO: add back after release, only pinned for optimization (https://github.com/ocaml-ppx/ppx_deriving/pull/252) - # [ "ppx_deriving.5.2.1" "git+https://github.com/ocaml-ppx/ppx_deriving.git#0a89b619f94cbbfc3b0fb3255ab4fe5bc77d32d6" ] -# ] + [ "ppx_deriving.5.2.1" "git+https://github.com/ocaml-ppx/ppx_deriving.git#0a89b619f94cbbfc3b0fb3255ab4fe5bc77d32d6" ] +] post-messages: [ "Do not benchmark Goblint on OCaml 5 (https://goblint.readthedocs.io/en/latest/user-guide/benchmarking/)." {ocaml:version >= "5.0.0"} ] diff --git a/src/analyses/base.ml b/src/analyses/base.ml index c4fdd633d3..ea595ad96d 100644 --- a/src/analyses/base.ml +++ b/src/analyses/base.ml @@ -150,8 +150,8 @@ struct let longjmp_return = ref dummyFunDec.svar - let heap_var ctx = - let info = match (ctx.ask Q.HeapVar) with + let heap_var on_stack ctx = + let info = match (ctx.ask (Q.AllocVar {on_stack})) with | `Lifted vinfo -> vinfo | _ -> failwith("Ran without a malloc analysis.") in info @@ -1122,6 +1122,10 @@ struct (* interpreter end *) + let is_not_heap_alloc_var ctx v = + let is_alloc = ctx.ask (Queries.IsAllocVar v) in + not is_alloc || (is_alloc && not (ctx.ask (Queries.IsHeapVar v))) + let query_invariant ctx context = let cpa = ctx.local.BaseDomain.cpa in let ask = Analyses.ask_of_ctx ctx in @@ -1249,7 +1253,7 @@ struct (* If there's a non-heap var or an offset in the lval set, we answer with bottom *) (* If we're asking for the BlobSize from the base address, then don't check for offsets => we want to avoid getting bot *) if AD.exists (function - | Addr (v,o) -> (not @@ ctx.ask (Queries.IsHeapVar v)) || (if not from_base_addr then o <> `NoOffset else false) + | Addr (v,o) -> is_not_heap_alloc_var ctx v || (if not from_base_addr then o <> `NoOffset else false) | _ -> false) a then Queries.Result.bot q else ( @@ -1397,7 +1401,7 @@ struct let t = match t_override with | Some t -> t | None -> - if a.f (Q.IsHeapVar x) then + if a.f (Q.IsAllocVar x) then (* the vtype of heap vars will be TVoid, so we need to trust the pointer we got to this to be of the right type *) (* i.e. use the static type of the pointer here *) lval_type @@ -1443,7 +1447,7 @@ struct (* Optimization to avoid evaluating integer values when setting them. The case when invariant = true requires the old_value to be sound for the meet. Allocated blocks are representend by Blobs with additional information, so they need to be looked-up. *) - let old_value = if not invariant && Cil.isIntegralType x.vtype && not (a.f (IsHeapVar x)) && offs = `NoOffset then begin + let old_value = if not invariant && Cil.isIntegralType x.vtype && not (a.f (IsAllocVar x)) && offs = `NoOffset then begin VD.bot_value ~varAttr:x.vattr lval_type end else Priv.read_global a priv_getg st x @@ -2009,7 +2013,7 @@ struct let check_invalid_mem_dealloc ctx special_fn ptr = let has_non_heap_var = AD.exists (function - | Addr (v,_) -> not (ctx.ask (Q.IsHeapVar v)) + | Addr (v,_) -> is_not_heap_alloc_var ctx v | _ -> false) in let has_non_zero_offset = AD.exists (function @@ -2026,6 +2030,7 @@ struct M.warn ~category:(Behavior (Undefined InvalidMemoryDeallocation)) ~tags:[CWE 761] "Free of memory not at start of buffer in function %s for pointer %a" special_fn.vname d_exp ptr | _ -> M.warn ~category:MessageCategory.Analyzer "Pointer %a in function %s doesn't evaluate to a valid address." d_exp ptr special_fn.vname + let special ctx (lv:lval option) (f: varinfo) (args: exp list) = let invalidate_ret_lv st = match lv with | Some lv -> @@ -2110,7 +2115,7 @@ struct let dest_a, dest_typ = addr_type_of_exp dest in let value = VD.zero_init_value dest_typ in set ~ctx (Analyses.ask_of_ctx ctx) gs st dest_a dest_typ value - | Memcpy { dest = dst; src }, _ -> + | Memcpy { dest = dst; src; n; }, _ -> (* TODO: use n *) memory_copying dst src (* strcpy(dest, src); *) | Strcpy { dest = dst; src; n = None }, _ -> @@ -2277,13 +2282,22 @@ struct | Unknown, "__goblint_assume_join" -> let id = List.hd args in Priv.thread_join ~force:true (Analyses.ask_of_ctx ctx) (priv_getg ctx.global) id st + | Alloca size, _ -> begin + match lv with + | Some lv -> + let heap_var = AD.of_var (heap_var true ctx) in + (* ignore @@ printf "alloca will allocate %a bytes\n" ID.pretty (eval_int ctx.ask gs st size); *) + set_many ~ctx (Analyses.ask_of_ctx ctx) gs st [(heap_var, TVoid [], Blob (VD.bot (), eval_int (Analyses.ask_of_ctx ctx) gs st size, true)); + (eval_lv (Analyses.ask_of_ctx ctx) gs st lv, (Cilfacade.typeOfLval lv), Address heap_var)] + | _ -> st + end | Malloc size, _ -> begin match lv with | Some lv -> let heap_var = if (get_bool "sem.malloc.fail") - then AD.join (AD.of_var (heap_var ctx)) AD.null_ptr - else AD.of_var (heap_var ctx) + then AD.join (AD.of_var (heap_var false ctx)) AD.null_ptr + else AD.of_var (heap_var false ctx) in (* ignore @@ printf "malloc will allocate %a bytes\n" ID.pretty (eval_int ctx.ask gs st size); *) set_many ~ctx (Analyses.ask_of_ctx ctx) gs st [(heap_var, TVoid [], Blob (VD.bot (), eval_int (Analyses.ask_of_ctx ctx) gs st size, true)); @@ -2293,7 +2307,7 @@ struct | Calloc { count = n; size }, _ -> begin match lv with | Some lv -> (* array length is set to one, as num*size is done when turning into `Calloc *) - let heap_var = heap_var ctx in + let heap_var = heap_var false ctx in let add_null addr = if get_bool "sem.malloc.fail" then AD.join addr AD.null_ptr (* calloc can fail and return NULL *) @@ -2336,7 +2350,7 @@ struct let p_addr_get = get ask gs st p_addr' None in (* implicitly includes join of malloc value (VD.bot) *) let size_int = eval_int ask gs st size in let heap_val:value = Blob (p_addr_get, size_int, true) in (* copy old contents with new size *) - let heap_addr = AD.of_var (heap_var ctx) in + let heap_addr = AD.of_var (heap_var false ctx) in let heap_addr' = if get_bool "sem.malloc.fail" then AD.join heap_addr AD.null_ptr @@ -2584,6 +2598,7 @@ struct | MayBeThreadReturn | PartAccess _ | IsHeapVar _ + | IsAllocVar _ | IsMultiple _ | CreatedThreads | MustJoinedThreads -> diff --git a/src/analyses/libraryDesc.ml b/src/analyses/libraryDesc.ml index 7862b2d6bd..72a4261cb5 100644 --- a/src/analyses/libraryDesc.ml +++ b/src/analyses/libraryDesc.ml @@ -43,6 +43,7 @@ type math = (** Type of special function, or {!Unknown}. *) (* Use inline record if not single {!Cil.exp} argument. *) type special = + | Alloca of Cil.exp | Malloc of Cil.exp | Calloc of { count: Cil.exp; size: Cil.exp; } | Realloc of { ptr: Cil.exp; size: Cil.exp; } @@ -67,7 +68,7 @@ type special = | Math of { fun_args: math; } | Memset of { dest: Cil.exp; ch: Cil.exp; count: Cil.exp; } | Bzero of { dest: Cil.exp; count: Cil.exp; } - | Memcpy of { dest: Cil.exp; src: Cil.exp } + | Memcpy of { dest: Cil.exp; src: Cil.exp; n: Cil.exp; } | Strcpy of { dest: Cil.exp; src: Cil.exp; n: Cil.exp option; } | Strcat of { dest: Cil.exp; src: Cil.exp; n: Cil.exp option; } | Strlen of Cil.exp diff --git a/src/analyses/libraryFunctions.ml b/src/analyses/libraryFunctions.ml index 59f63e04c0..1c509e7660 100644 --- a/src/analyses/libraryFunctions.ml +++ b/src/analyses/libraryFunctions.ml @@ -12,13 +12,13 @@ let c_descs_list: (string * LibraryDesc.t) list = LibraryDsl.[ ("memset", special [__ "dest" [w]; __ "ch" []; __ "count" []] @@ fun dest ch count -> Memset { dest; ch; count; }); ("__builtin_memset", special [__ "dest" [w]; __ "ch" []; __ "count" []] @@ fun dest ch count -> Memset { dest; ch; count; }); ("__builtin___memset_chk", special [__ "dest" [w]; __ "ch" []; __ "count" []; drop "os" []] @@ fun dest ch count -> Memset { dest; ch; count; }); - ("memcpy", special [__ "dest" [w]; __ "src" [r]; drop "n" []] @@ fun dest src -> Memcpy { dest; src }); (* TODO: use n *) - ("__builtin_memcpy", special [__ "dest" [w]; __ "src" [r]; drop "n" []] @@ fun dest src -> Memcpy { dest; src }); - ("__builtin___memcpy_chk", special [__ "dest" [w]; __ "src" [r]; drop "n" []; drop "os" []] @@ fun dest src -> Memcpy { dest; src }); - ("memccpy", special [__ "dest" [w]; __ "src" [r]; drop "c" []; drop "n" []] @@ fun dest src -> Memcpy {dest; src}); (* C23 *) (* TODO: use n and c *) - ("memmove", special [__ "dest" [w]; __ "src" [r]; drop "count" []] @@ fun dest src -> Memcpy { dest; src }); - ("__builtin_memmove", special [__ "dest" [w]; __ "src" [r]; drop "count" []] @@ fun dest src -> Memcpy { dest; src }); - ("__builtin___memmove_chk", special [__ "dest" [w]; __ "src" [r]; drop "count" []; drop "os" []] @@ fun dest src -> Memcpy { dest; src }); + ("memcpy", special [__ "dest" [w]; __ "src" [r]; __ "n" []] @@ fun dest src n -> Memcpy { dest; src; n; }); + ("__builtin_memcpy", special [__ "dest" [w]; __ "src" [r]; __ "n" []] @@ fun dest src n -> Memcpy { dest; src; n; }); + ("__builtin___memcpy_chk", special [__ "dest" [w]; __ "src" [r]; __ "n" []; drop "os" []] @@ fun dest src n -> Memcpy { dest; src; n; }); + ("memccpy", special [__ "dest" [w]; __ "src" [r]; drop "c" []; __ "n" []] @@ fun dest src n -> Memcpy {dest; src; n; }); (* C23 *) (* TODO: use c *) + ("memmove", special [__ "dest" [w]; __ "src" [r]; __ "count" []] @@ fun dest src count -> Memcpy { dest; src; n = count; }); + ("__builtin_memmove", special [__ "dest" [w]; __ "src" [r]; __ "count" []] @@ fun dest src count -> Memcpy { dest; src; n = count; }); + ("__builtin___memmove_chk", special [__ "dest" [w]; __ "src" [r]; __ "count" []; drop "os" []] @@ fun dest src count -> Memcpy { dest; src; n = count; }); ("strcpy", special [__ "dest" [w]; __ "src" [r]] @@ fun dest src -> Strcpy { dest; src; n = None; }); ("__builtin_strcpy", special [__ "dest" [w]; __ "src" [r]] @@ fun dest src -> Strcpy { dest; src; n = None; }); ("__builtin___strcpy_chk", special [__ "dest" [w]; __ "src" [r]; drop "os" []] @@ fun dest src -> Strcpy { dest; src; n = None; }); @@ -138,6 +138,12 @@ let c_descs_list: (string * LibraryDesc.t) list = LibraryDsl.[ ("atoll", unknown [drop "nptr" [r]]); ("setlocale", unknown [drop "category" []; drop "locale" [r]]); ("clock", unknown []); + ("atomic_flag_clear", unknown [drop "obj" [w]]); + ("atomic_flag_clear_explicit", unknown [drop "obj" [w]; drop "order" []]); + ("atomic_flag_test_and_set", unknown [drop "obj" [r; w]]); + ("atomic_flag_test_and_set_explicit", unknown [drop "obj" [r; w]; drop "order" []]); + ("atomic_load", unknown [drop "obj" [r]]); + ("atomic_store", unknown [drop "obj" [w]; drop "desired" []]); ] (** C POSIX library functions. @@ -491,11 +497,25 @@ let gcc_descs_list: (string * LibraryDesc.t) list = LibraryDsl.[ ("__builtin_popcountl", unknown [drop "x" []]); ("__builtin_popcountll", unknown [drop "x" []]); ("__atomic_store_n", unknown [drop "ptr" [w]; drop "val" []; drop "memorder" []]); + ("__atomic_store", unknown [drop "ptr" [w]; drop "val" [r]; drop "memorder" []]); ("__atomic_load_n", unknown [drop "ptr" [r]; drop "memorder" []]); + ("__atomic_load", unknown [drop "ptr" [r]; drop "ret" [w]; drop "memorder" []]); + ("__atomic_clear", unknown [drop "ptr" [w]; drop "memorder" []]); + ("__atomic_compare_exchange_n", unknown [drop "ptr" [r; w]; drop "expected" [r; w]; drop "desired" []; drop "weak" []; drop "success_memorder" []; drop "failure_memorder" []]); + ("__atomic_compare_exchange", unknown [drop "ptr" [r; w]; drop "expected" [r; w]; drop "desired" [r]; drop "weak" []; drop "success_memorder" []; drop "failure_memorder" []]); + ("__atomic_fetch_add", unknown [drop "ptr" [r; w]; drop "val" []; drop "memorder" []]); + ("__atomic_fetch_sub", unknown [drop "ptr" [r; w]; drop "val" []; drop "memorder" []]); + ("__atomic_fetch_and", unknown [drop "ptr" [r; w]; drop "val" []; drop "memorder" []]); + ("__atomic_fetch_xor", unknown [drop "ptr" [r; w]; drop "val" []; drop "memorder" []]); + ("__atomic_fetch_or", unknown [drop "ptr" [r; w]; drop "val" []; drop "memorder" []]); + ("__atomic_fetch_nand", unknown [drop "ptr" [r; w]; drop "val" []; drop "memorder" []]); + ("__atomic_test_and_set", unknown [drop "ptr" [r; w]; drop "memorder" []]); + ("__atomic_thread_fence", unknown [drop "memorder" []]); ("__sync_fetch_and_add", unknown (drop "ptr" [r; w] :: drop "value" [] :: VarArgs (drop' []))); ("__sync_fetch_and_sub", unknown (drop "ptr" [r; w] :: drop "value" [] :: VarArgs (drop' []))); ("__builtin_va_copy", unknown [drop "dest" [w]; drop "src" [r]]); - ("__builtin_alloca", special [__ "size" []] @@ fun size -> Malloc size); + ("alloca", special [__ "size" []] @@ fun size -> Alloca size); + ("__builtin_alloca", special [__ "size" []] @@ fun size -> Alloca size); ] let glibc_desc_list: (string * LibraryDesc.t) list = LibraryDsl.[ @@ -536,8 +556,8 @@ let glibc_desc_list: (string * LibraryDesc.t) list = LibraryDsl.[ ("strcasestr", unknown [drop "haystack" [r]; drop "needle" [r]]); ("inet_aton", unknown [drop "cp" [r]; drop "inp" [w]]); ("fopencookie", unknown [drop "cookie" []; drop "mode" [r]; drop "io_funcs" [s_deep]]); (* doesn't access cookie but passes it to io_funcs *) - ("mempcpy", special [__ "dest" [w]; __ "src" [r]; drop "n" []] @@ fun dest src -> Memcpy { dest; src }); - ("__builtin___mempcpy_chk", special [__ "dest" [w]; __ "src" [r]; drop "n" []; drop "os" []] @@ fun dest src -> Memcpy { dest; src }); + ("mempcpy", special [__ "dest" [w]; __ "src" [r]; __ "n" []] @@ fun dest src n -> Memcpy { dest; src; n; }); + ("__builtin___mempcpy_chk", special [__ "dest" [w]; __ "src" [r]; __ "n" []; drop "os" []] @@ fun dest src n -> Memcpy { dest; src; n; }); ("rawmemchr", unknown [drop "s" [r]; drop "c" []]); ("memrchr", unknown [drop "s" [r]; drop "c" []; drop "n" []]); ("memmem", unknown [drop "haystack" [r]; drop "haystacklen" []; drop "needle" [r]; drop "needlelen" [r]]); diff --git a/src/analyses/mCP.ml b/src/analyses/mCP.ml index 1b6a7e5a1d..9eb21b77ef 100644 --- a/src/analyses/mCP.ml +++ b/src/analyses/mCP.ml @@ -144,7 +144,9 @@ struct let spawn_one v d = List.iter (fun (lval, args) -> ctx.spawn lval v args) d in - if not (get_bool "exp.single-threaded") then + if get_bool "exp.single-threaded" then + M.msg_final Error ~category:Unsound "Thread not spawned" + else iter (uncurry spawn_one) @@ group_assoc_eq Basetype.Variables.equal xs let do_sideg ctx (xs:(V.t * (WideningTokens.TS.t * G.t)) list) = diff --git a/src/analyses/mallocFresh.ml b/src/analyses/mallocFresh.ml index 2c2b99a075..3a501fc72f 100644 --- a/src/analyses/mallocFresh.ml +++ b/src/analyses/mallocFresh.ml @@ -43,7 +43,7 @@ struct | Malloc _ | Calloc _ | Realloc _ -> - begin match ctx.ask HeapVar with + begin match ctx.ask (AllocVar {on_stack = false}) with | `Lifted var -> D.add var ctx.local | _ -> ctx.local end diff --git a/src/analyses/memLeak.ml b/src/analyses/memLeak.ml index 1bff7611a4..8576096dfe 100644 --- a/src/analyses/memLeak.ml +++ b/src/analyses/memLeak.ml @@ -44,7 +44,7 @@ struct | Realloc _ -> (* Warn about multi-threaded programs as soon as we encounter a dynamic memory allocation function *) warn_for_multi_threaded ctx; - begin match ctx.ask Queries.HeapVar with + begin match ctx.ask (Queries.AllocVar {on_stack = false}) with | `Lifted var -> D.add var state | _ -> state end @@ -53,7 +53,7 @@ struct | ad when not (Queries.AD.is_top ad) && Queries.AD.cardinal ad = 1 -> (* Note: Need to always set "ana.malloc.unique_address_count" to a value > 0 *) begin match Queries.AD.choose ad with - | Queries.AD.Addr.Addr (v,_) when ctx.ask (Queries.IsHeapVar v) && not @@ ctx.ask (Queries.IsMultiple v) -> D.remove v state (* Unique pointed to heap vars *) + | Queries.AD.Addr.Addr (v,_) when ctx.ask (Queries.IsAllocVar v) && ctx.ask (Queries.IsHeapVar v) && not @@ ctx.ask (Queries.IsMultiple v) -> D.remove v state (* Unique pointed to heap vars *) | _ -> state end | _ -> state diff --git a/src/analyses/memOutOfBounds.ml b/src/analyses/memOutOfBounds.ml index 94c16e9c94..7015e6f143 100644 --- a/src/analyses/memOutOfBounds.ml +++ b/src/analyses/memOutOfBounds.ml @@ -6,6 +6,7 @@ open MessageCategory module AS = AnalysisState module VDQ = ValueDomainQueries +module ID = IntDomain.IntDomTuple (* Note: @@ -27,34 +28,11 @@ struct (* HELPER FUNCTIONS *) let intdom_of_int x = - IntDomain.IntDomTuple.of_int (Cilfacade.ptrdiff_ikind ()) (Z.of_int x) + ID.of_int (Cilfacade.ptrdiff_ikind ()) (Z.of_int x) - let to_index ?typ offs = - let idx_of_int x = - IntDomain.IntDomTuple.of_int (Cilfacade.ptrdiff_ikind ()) (Z.of_int (x / 8)) - in - let rec offset_to_index_offset ?typ offs = match offs with - | `NoOffset -> idx_of_int 0 - | `Field (field, o) -> - let field_as_offset = Field (field, NoOffset) in - let bits_offset, _size = GoblintCil.bitsOffset (TComp (field.fcomp, [])) field_as_offset in - let bits_offset = idx_of_int bits_offset in - let remaining_offset = offset_to_index_offset ~typ:field.ftype o in - IntDomain.IntDomTuple.add bits_offset remaining_offset - | `Index (x, o) -> - let (item_typ, item_size_in_bits) = - match Option.map unrollType typ with - | Some TArray(item_typ, _, _) -> - let item_size_in_bits = bitsSizeOf item_typ in - (Some item_typ, idx_of_int item_size_in_bits) - | _ -> - (None, IntDomain.IntDomTuple.top_of @@ Cilfacade.ptrdiff_ikind ()) - in - let bits_offset = IntDomain.IntDomTuple.mul item_size_in_bits x in - let remaining_offset = offset_to_index_offset ?typ:item_typ o in - IntDomain.IntDomTuple.add bits_offset remaining_offset - in - offset_to_index_offset ?typ offs + let size_of_type_in_bytes typ = + let typ_size_in_bytes = (bitsSizeOf typ) / 8 in + intdom_of_int typ_size_in_bytes let rec exp_contains_a_ptr (exp:exp) = match exp with @@ -112,50 +90,51 @@ struct | Addr (v, _) -> begin match v.vtype with | TArray (item_typ, _, _) -> - let item_typ_size_in_bytes = (bitsSizeOf item_typ) / 8 in - let item_typ_size_in_bytes = intdom_of_int item_typ_size_in_bytes in + let item_typ_size_in_bytes = size_of_type_in_bytes item_typ in begin match ctx.ask (Queries.EvalLength ptr) with - | `Lifted arr_len -> `Lifted (IntDomain.IntDomTuple.mul item_typ_size_in_bytes arr_len) - | `Bot -> VDQ.ID.bot () - | `Top -> VDQ.ID.top () + | `Lifted arr_len -> + let arr_len_casted = ID.cast_to (Cilfacade.ptrdiff_ikind ()) arr_len in + begin + try `Lifted (ID.mul item_typ_size_in_bytes arr_len_casted) + with IntDomain.ArithmeticOnIntegerBot _ -> `Bot + end + | `Bot -> `Bot + | `Top -> `Top end | _ -> - let type_size_in_bytes = (bitsSizeOf v.vtype) / 8 in - `Lifted (intdom_of_int type_size_in_bytes) + let type_size_in_bytes = size_of_type_in_bytes v.vtype in + `Lifted type_size_in_bytes end - | _ -> VDQ.ID.top () + | _ -> `Top end in (* Map each points-to-set element to its size *) let pts_sizes = List.map pts_elems_to_sizes pts_list in (* Take the smallest of all sizes that ptr's contents may have *) begin match pts_sizes with - | [] -> VDQ.ID.bot () + | [] -> `Bot | [x] -> x - | x::xs -> List.fold_left (fun acc elem -> - if VDQ.ID.compare acc elem >= 0 then elem else acc - ) x xs + | x::xs -> List.fold_left VDQ.ID.join x xs end | _ -> M.warn "Pointer %a has a points-to-set of top. An invalid memory access might occur" d_exp ptr; - VDQ.ID.top () + `Top let get_ptr_deref_type ptr_typ = match ptr_typ with | TPtr (t, _) -> Some t | _ -> None - let size_of_type_in_bytes typ = - let typ_size_in_bytes = (bitsSizeOf typ) / 8 in - intdom_of_int typ_size_in_bytes - let eval_ptr_offset_in_binop ctx exp ptr_contents_typ = let eval_offset = ctx.ask (Queries.EvalInt exp) in - let eval_offset = Option.get @@ VDQ.ID.to_int eval_offset in - let eval_offset = VDQ.ID.of_int (Cilfacade.ptrdiff_ikind ()) eval_offset in let ptr_contents_typ_size_in_bytes = size_of_type_in_bytes ptr_contents_typ in match eval_offset with - | `Lifted i -> `Lifted (IntDomain.IntDomTuple.mul i ptr_contents_typ_size_in_bytes) + | `Lifted eo -> + let casted_eo = ID.cast_to (Cilfacade.ptrdiff_ikind ()) eo in + begin + try `Lifted (ID.mul casted_eo ptr_contents_typ_size_in_bytes) + with IntDomain.ArithmeticOnIntegerBot _ -> `Bot + end | `Top -> `Top | `Bot -> `Bot @@ -167,12 +146,18 @@ struct let bits_offset, _size = GoblintCil.bitsOffset (TComp (field.fcomp, [])) field_as_offset in let bytes_offset = intdom_of_int (bits_offset / 8) in let remaining_offset = offs_to_idx field.ftype o in - IntDomain.IntDomTuple.add bytes_offset remaining_offset + begin + try ID.add bytes_offset remaining_offset + with IntDomain.ArithmeticOnIntegerBot _ -> ID.bot_of @@ Cilfacade.ptrdiff_ikind () + end | `Index (x, o) -> - let typ_size_in_bytes = size_of_type_in_bytes typ in - let bytes_offset = IntDomain.IntDomTuple.mul typ_size_in_bytes x in - let remaining_offset = offs_to_idx typ o in - IntDomain.IntDomTuple.add bytes_offset remaining_offset + begin try + let typ_size_in_bytes = size_of_type_in_bytes typ in + let bytes_offset = ID.mul typ_size_in_bytes x in + let remaining_offset = offs_to_idx typ o in + ID.add bytes_offset remaining_offset + with IntDomain.ArithmeticOnIntegerBot _ -> ID.bot_of @@ Cilfacade.ptrdiff_ikind () + end let rec get_addr_offs ctx ptr = match ctx.ask (Queries.MayPointTo ptr) with @@ -183,17 +168,17 @@ struct begin match VDQ.AD.is_empty a with | true -> M.warn "Pointer %a has an empty points-to-set" d_exp ptr; - IntDomain.IntDomTuple.top_of @@ Cilfacade.ptrdiff_ikind () + ID.top_of @@ Cilfacade.ptrdiff_ikind () | false -> if VDQ.AD.exists (function - | Addr (_, o) -> IntDomain.IntDomTuple.is_bot @@ offs_to_idx t o + | Addr (_, o) -> ID.is_bot @@ offs_to_idx t o | _ -> false ) a then ( (* TODO: Uncomment once staging-memsafety branch changes are applied *) (* set_mem_safety_flag InvalidDeref; *) M.warn "Pointer %a has a bot address offset. An invalid memory access may occur" d_exp ptr ) else if VDQ.AD.exists (function - | Addr (_, o) -> IntDomain.IntDomTuple.is_bot @@ offs_to_idx t o + | Addr (_, o) -> ID.is_bot @@ offs_to_idx t o | _ -> false ) a then ( (* TODO: Uncomment once staging-memsafety branch changes are applied *) @@ -204,16 +189,16 @@ struct (* Hence, we can just pick one element and obtain its offset *) begin match VDQ.AD.choose a with | Addr (_, o) -> offs_to_idx t o - | _ -> IntDomain.IntDomTuple.top_of @@ Cilfacade.ptrdiff_ikind () + | _ -> ID.top_of @@ Cilfacade.ptrdiff_ikind () end end | None -> M.error "Expression %a doesn't have pointer type" d_exp ptr; - IntDomain.IntDomTuple.top_of @@ Cilfacade.ptrdiff_ikind () + ID.top_of @@ Cilfacade.ptrdiff_ikind () end | _ -> M.warn "Pointer %a has a points-to-set of top. An invalid memory access might occur" d_exp ptr; - IntDomain.IntDomTuple.top_of @@ Cilfacade.ptrdiff_ikind () + ID.top_of @@ Cilfacade.ptrdiff_ikind () and check_lval_for_oob_access ctx ?(is_implicitly_derefed = false) lval = if not @@ lval_contains_a_ptr lval then () @@ -242,15 +227,25 @@ struct let ptr_contents_type = get_ptr_deref_type ptr_type in match ptr_contents_type with | Some t -> - begin match VDQ.ID.is_top ptr_size with - | true -> + begin match ptr_size, addr_offs with + | `Top, _ -> + AS.svcomp_may_invalid_deref := true; + M.warn ~category:(Behavior behavior) ~tags:[CWE cwe_number] "Size of pointer %a is top. Memory out-of-bounds access might occur due to pointer arithmetic" d_exp lval_exp + | `Bot, _ -> AS.svcomp_may_invalid_deref := true; - M.warn ~category:(Behavior behavior) ~tags:[CWE cwe_number] "Size of pointer %a not known. Memory out-of-bounds access might occur due to pointer arithmetic" d_exp lval_exp - | false -> - let offs = `Lifted addr_offs in - if ptr_size < offs then begin - AS.svcomp_may_invalid_deref := true; - M.warn ~category:(Behavior behavior) ~tags:[CWE cwe_number] "Size of pointer is %a (in bytes). It is offset by %a (in bytes) due to pointer arithmetic. Memory out-of-bounds access must occur" VDQ.ID.pretty ptr_size VDQ.ID.pretty offs + M.warn ~category:(Behavior behavior) ~tags:[CWE cwe_number] "Size of pointer %a is bot. Memory out-of-bounds access might occur due to pointer arithmetic" d_exp lval_exp + | `Lifted ps, ao -> + let casted_ps = ID.cast_to (Cilfacade.ptrdiff_ikind ()) ps in + let casted_ao = ID.cast_to (Cilfacade.ptrdiff_ikind ()) ao in + let ptr_size_lt_offs = ID.lt casted_ps casted_ao in + begin match ID.to_bool ptr_size_lt_offs with + | Some true -> + AnalysisState.svcomp_may_invalid_deref := true; + M.warn ~category:(Behavior behavior) ~tags:[CWE cwe_number] "Size of pointer is %a (in bytes). It is offset by %a (in bytes) due to pointer arithmetic. Memory out-of-bounds access must occur" ID.pretty casted_ps ID.pretty casted_ao + | Some false -> () + | None -> + AnalysisState.svcomp_may_invalid_deref := true; + M.warn ~category:(Behavior behavior) ~tags:[CWE cwe_number] "Could not compare size of pointer (%a) (in bytes) with offset by (%a) (in bytes). Memory out-of-bounds access might occur" ID.pretty casted_ps ID.pretty casted_ao end end | _ -> M.error "Expression %a is not a pointer" d_exp lval_exp @@ -296,27 +291,81 @@ struct let offset_size = eval_ptr_offset_in_binop ctx e2 t in (* Make sure to add the address offset to the binop offset *) let offset_size_with_addr_size = match offset_size with - | `Lifted os -> `Lifted (IntDomain.IntDomTuple.add os addr_offs) + | `Lifted os -> + let casted_os = ID.cast_to (Cilfacade.ptrdiff_ikind ()) os in + let casted_ao = ID.cast_to (Cilfacade.ptrdiff_ikind ()) addr_offs in + begin + try `Lifted (ID.add casted_os casted_ao) + with IntDomain.ArithmeticOnIntegerBot _ -> `Bot + end | `Top -> `Top | `Bot -> `Bot in - begin match VDQ.ID.is_top ptr_size, VDQ.ID.is_top offset_size_with_addr_size with - | true, _ -> + begin match ptr_size, offset_size_with_addr_size with + | `Top, _ -> + AS.svcomp_may_invalid_deref := true; + M.warn ~category:(Behavior behavior) ~tags:[CWE cwe_number] "Size of pointer %a in expression %a is top. Memory out-of-bounds access might occur" d_exp e1 d_exp binopexp + | _, `Top -> AS.svcomp_may_invalid_deref := true; - M.warn ~category:(Behavior behavior) ~tags:[CWE cwe_number] "Size of pointer %a in expression %a not known. Memory out-of-bounds access might occur" d_exp e1 d_exp binopexp - | _, true -> + M.warn ~category:(Behavior behavior) ~tags:[CWE cwe_number] "Operand value for pointer arithmetic in expression %a is top. Memory out-of-bounds access might occur" d_exp binopexp + | `Bot, _ -> AS.svcomp_may_invalid_deref := true; - M.warn ~category:(Behavior behavior) ~tags:[CWE cwe_number] "Operand value for pointer arithmetic in expression %a not known. Memory out-of-bounds access might occur" d_exp binopexp - | false, false -> - if ptr_size < offset_size_with_addr_size then begin - AS.svcomp_may_invalid_deref := true; - M.warn ~category:(Behavior behavior) ~tags:[CWE cwe_number] "Size of pointer in expression %a is %a (in bytes). It is offset by %a (in bytes). Memory out-of-bounds access must occur" d_exp binopexp VDQ.ID.pretty ptr_size VDQ.ID.pretty offset_size_with_addr_size + M.warn ~category:(Behavior behavior) ~tags:[CWE cwe_number] "Size of pointer %a in expression %a is bottom. Memory out-of-bounds access might occur" d_exp e1 d_exp binopexp + | _, `Bot -> + AS.svcomp_may_invalid_deref := true; + M.warn ~category:(Behavior behavior) ~tags:[CWE cwe_number] "Operand value for pointer arithmetic in expression %a is bottom. Memory out-of-bounds access might occur" d_exp binopexp + | `Lifted ps, `Lifted o -> + let casted_ps = ID.cast_to (Cilfacade.ptrdiff_ikind ()) ps in + let casted_o = ID.cast_to (Cilfacade.ptrdiff_ikind ()) o in + let ptr_size_lt_offs = ID.lt casted_ps casted_o in + begin match ID.to_bool ptr_size_lt_offs with + | Some true -> + AS.svcomp_may_invalid_deref := true; + M.warn ~category:(Behavior behavior) ~tags:[CWE cwe_number] "Size of pointer in expression %a is %a (in bytes). It is offset by %a (in bytes). Memory out-of-bounds access must occur" d_exp binopexp ID.pretty casted_ps ID.pretty casted_o + | Some false -> () + | None -> + AnalysisState.svcomp_may_invalid_deref := true; + M.warn ~category:(Behavior behavior) ~tags:[CWE cwe_number] "Could not compare pointer size (%a) with offset (%a). Memory out-of-bounds access may occur" ID.pretty casted_ps ID.pretty casted_o end end | _ -> M.error "Binary expression %a doesn't have a pointer" d_exp binopexp end | _ -> () + (* For memset() and memcpy() *) + let check_count ctx fun_name dest n = + let (behavior:MessageCategory.behavior) = Undefined MemoryOutOfBoundsAccess in + let cwe_number = 823 in + let dest_size = get_size_of_ptr_target ctx dest in + let eval_n = ctx.ask (Queries.EvalInt n) in + let addr_offs = get_addr_offs ctx dest in + match dest_size, eval_n with + | `Top, _ -> + AnalysisState.svcomp_may_invalid_deref := true; + M.warn ~category:(Behavior behavior) ~tags:[CWE cwe_number] "Size of dest %a in function %s is unknown. Memory out-of-bounds access might occur" d_exp dest fun_name + | _, `Top -> + AnalysisState.svcomp_may_invalid_deref := true; + M.warn ~category:(Behavior behavior) ~tags:[CWE cwe_number] "Count parameter, passed to function %s is unknown. Memory out-of-bounds access might occur" fun_name + | `Bot, _ -> + AnalysisState.svcomp_may_invalid_deref := true; + M.warn ~category:(Behavior behavior) ~tags:[CWE cwe_number] "Size of dest %a in function %s is bottom. Memory out-of-bounds access might occur" d_exp dest fun_name + | _, `Bot -> + M.warn ~category:(Behavior behavior) ~tags:[CWE cwe_number] "Count parameter, passed to function %s is bottom" fun_name + | `Lifted ds, `Lifted en -> + let casted_ds = ID.cast_to (Cilfacade.ptrdiff_ikind ()) ds in + let casted_en = ID.cast_to (Cilfacade.ptrdiff_ikind ()) en in + let casted_ao = ID.cast_to (Cilfacade.ptrdiff_ikind ()) addr_offs in + let dest_size_lt_count = ID.lt casted_ds (ID.add casted_en casted_ao) in + begin match ID.to_bool dest_size_lt_count with + | Some true -> + AnalysisState.svcomp_may_invalid_deref := true; + M.warn ~category:(Behavior behavior) ~tags:[CWE cwe_number] "Size of dest in function %s is %a (in bytes) with an address offset of %a (in bytes). Count is %a (in bytes). Memory out-of-bounds access must occur" fun_name ID.pretty casted_ds ID.pretty casted_ao ID.pretty casted_en + | Some false -> () + | None -> + AnalysisState.svcomp_may_invalid_deref := true; + M.warn ~category:(Behavior behavior) ~tags:[CWE cwe_number] "Could not compare size of dest (%a) with address offset (%a) count (%a) in function %s. Memory out-of-bounds access may occur" ID.pretty casted_ds ID.pretty casted_ao ID.pretty casted_en fun_name + end + (* TRANSFER FUNCTIONS *) @@ -344,6 +393,11 @@ struct in Option.iter (fun x -> check_lval_for_oob_access ctx x) lval; List.iter (fun arg -> check_exp_for_oob_access ctx ~is_implicitly_derefed:(is_arg_implicitly_derefed arg) arg) arglist; + (* Check calls to memset and memcpy for out-of-bounds-accesses *) + match desc.special arglist with + | Memset { dest; ch; count; } -> check_count ctx f.vname dest count; + | Memcpy { dest; src; n = count; } -> check_count ctx f.vname dest count; + | _ -> (); ctx.local let enter ctx (lval: lval option) (f:fundec) (args:exp list) : (D.t * D.t) list = diff --git a/src/analyses/useAfterFree.ml b/src/analyses/useAfterFree.ml index 1b1e2f008d..02231336c0 100644 --- a/src/analyses/useAfterFree.ml +++ b/src/analyses/useAfterFree.ml @@ -4,7 +4,12 @@ open GoblintCil open Analyses open MessageCategory -module ToppedVarInfoSet = SetDomain.ToppedSet(CilType.Varinfo)(struct let topname = "All Heap Variables" end) +module AllocaVars = SetDomain.ToppedSet(CilType.Varinfo)(struct let topname = "All alloca() Variables" end) +module HeapVars = SetDomain.ToppedSet(CilType.Varinfo)(struct let topname = "All Heap Variables" end) + +(* Heap vars created by alloca() and deallocated at function exit * Heap vars deallocated by free() *) +module StackAndHeapVars = Lattice.Prod(AllocaVars)(HeapVars) + module ThreadIdToJoinedThreadsMap = MapDomain.MapBot(ThreadIdDomain.ThreadLifted)(ConcDomain.MustThreadSet) module Spec : Analyses.MCPSpec = @@ -13,7 +18,7 @@ struct let name () = "useAfterFree" - module D = ToppedVarInfoSet + module D = StackAndHeapVars module C = Lattice.Unit module G = ThreadIdToJoinedThreadsMap module V = VarinfoV @@ -75,7 +80,7 @@ struct set_global_svcomp_var is_double_free; M.warn ~category:(Behavior behavior) ~tags:[CWE cwe_number] "Current thread is not unique and a %s might occur for heap variable %a" bug_name CilType.Varinfo.pretty heap_var end - else if D.mem heap_var ctx.local then begin + else if HeapVars.mem heap_var (snd ctx.local) then begin set_global_svcomp_var is_double_free; M.warn ~category:(Behavior behavior) ~tags:[CWE cwe_number] "%s might occur in current unique thread %a for heap variable %a" bug_name ThreadIdDomain.FlagConfiguredTID.pretty current CilType.Varinfo.pretty heap_var end @@ -110,13 +115,13 @@ struct begin match ctx.ask (Queries.MayPointTo lval_to_query) with | ad when not (Queries.AD.is_top ad) -> let warn_for_heap_var v = - if D.mem v state then + if HeapVars.mem v (snd state) then M.warn ~category:(Behavior undefined_behavior) ~tags:[CWE cwe_number] "lval (%s) in \"%s\" points to a maybe freed memory region" v.vname transfer_fn_name in let pointed_to_heap_vars = Queries.AD.fold (fun addr vars -> match addr with - | Queries.AD.Addr.Addr (v,_) when ctx.ask (Queries.IsHeapVar v) -> v :: vars + | Queries.AD.Addr.Addr (v,_) when ctx.ask (Queries.IsAllocVar v) -> v :: vars | _ -> vars ) ad [] in @@ -160,8 +165,7 @@ struct let globals_to_side_effect = G.add threadid joined_threads current_globals in ctx.sideg heap_var globals_to_side_effect in - D.iter side_effect_globals_to_heap_var freed_heap_vars - + HeapVars.iter side_effect_globals_to_heap_var freed_heap_vars (* TRANSFER FUNCTIONS *) @@ -185,7 +189,7 @@ struct let enter ctx (lval:lval option) (f:fundec) (args:exp list) : (D.t * D.t) list = let caller_state = ctx.local in List.iter (fun arg -> warn_exp_might_contain_freed "enter" ctx arg) args; - if D.is_empty caller_state then + if AllocaVars.is_empty (fst caller_state) && HeapVars.is_empty (snd caller_state) then [caller_state, caller_state] else ( let reachable_from_args = List.fold_left (fun ad arg -> Queries.AD.join ad (ctx.ask (ReachableFrom arg))) (Queries.AD.empty ()) args in @@ -193,13 +197,18 @@ struct [caller_state, caller_state] else let reachable_vars = Queries.AD.to_var_may reachable_from_args in - let callee_state = D.filter (fun var -> List.mem var reachable_vars) caller_state in (* TODO: use AD.mem directly *) + let callee_state = (AllocaVars.empty (), HeapVars.filter (fun var -> List.mem var reachable_vars) (snd caller_state)) in (* TODO: use AD.mem directly *) [caller_state, callee_state] ) let combine_env ctx (lval:lval option) fexp (f:fundec) (args:exp list) fc (callee_local:D.t) (f_ask:Queries.ask) : D.t = - let caller_state = ctx.local in - D.join caller_state callee_local + let (caller_stack_state, caller_heap_state) = ctx.local in + let callee_stack_state = fst callee_local in + let callee_heap_state = snd callee_local in + (* Put all alloca()-vars together with all freed() vars in the caller's second component *) + (* Don't change caller's first component => caller hasn't exited yet *) + let callee_combined_state = HeapVars.join callee_stack_state callee_heap_state in + (caller_stack_state, HeapVars.join caller_heap_state callee_combined_state) let combine_assign ctx (lval:lval option) fexp (f:fundec) (args:exp list) fc (callee_local:D.t) (f_ask: Queries.ask): D.t = Option.iter (fun x -> warn_lval_might_contain_freed "enter" ctx x) lval; @@ -224,14 +233,20 @@ struct let pointed_to_heap_vars = Queries.AD.fold (fun addr state -> match addr with - | Queries.AD.Addr.Addr (var,_) when ctx.ask (Queries.IsHeapVar var) -> D.add var state + | Queries.AD.Addr.Addr (var,_) when ctx.ask (Queries.IsAllocVar var) && ctx.ask (Queries.IsHeapVar var) -> HeapVars.add var state | _ -> state - ) ad (D.empty ()) + ) ad (HeapVars.empty ()) in (* Side-effect the tid that's freeing all the heap vars collected here *) side_effect_mem_free ctx pointed_to_heap_vars (get_current_threadid ctx) (get_joined_threads ctx); (* Add all heap vars, which ptr points to, to the state *) - D.join state (pointed_to_heap_vars) + (fst state, HeapVars.join (snd state) pointed_to_heap_vars) + | _ -> state + end + | Alloca _ -> + (* Create fresh heap var for the alloca() call *) + begin match ctx.ask (Queries.AllocVar {on_stack = true}) with + | `Lifted v -> (AllocaVars.add v (fst state), snd state) | _ -> state end | _ -> state diff --git a/src/analyses/wrapperFunctionAnalysis.ml b/src/analyses/wrapperFunctionAnalysis.ml index d9bbdb6197..5c0176df48 100644 --- a/src/analyses/wrapperFunctionAnalysis.ml +++ b/src/analyses/wrapperFunctionAnalysis.ml @@ -133,7 +133,7 @@ module MallocWrapper : MCPSpec = struct let query (ctx: (D.t, G.t, C.t, V.t) ctx) (type a) (q: a Q.t): a Q.result = let wrapper_node, counter = ctx.local in match q with - | Q.HeapVar -> + | Q.AllocVar {on_stack = on_stack} -> let node = match wrapper_node with | `Lifted wrapper_node -> wrapper_node | _ -> node_for_ctx ctx @@ -141,8 +141,11 @@ module MallocWrapper : MCPSpec = struct let count = UniqueCallCounter.find (`Lifted node) counter in let var = NodeVarinfoMap.to_varinfo (ctx.ask Q.CurrentThreadId, node, count) in var.vdecl <- UpdateCil.getLoc node; (* TODO: does this do anything bad for incremental? *) + if on_stack then var.vattr <- addAttribute (Attr ("stack_alloca", [])) var.vattr; (* If the call was for stack allocation, add an attr to mark the heap var *) `Lifted var | Q.IsHeapVar v -> + NodeVarinfoMap.mem_varinfo v && not @@ hasAttribute "stack_alloca" v.vattr + | Q.IsAllocVar v -> NodeVarinfoMap.mem_varinfo v | Q.IsMultiple v -> begin match NodeVarinfoMap.from_varinfo v with diff --git a/src/domains/access.ml b/src/domains/access.ml index fa6446df16..8907ccbc32 100644 --- a/src/domains/access.ml +++ b/src/domains/access.ml @@ -10,28 +10,105 @@ module M = Messages (* Some helper functions to avoid flagging race warnings on atomic types, and * other irrelevant stuff, such as mutexes and functions. *) -let is_ignorable_type (t: typ): bool = - match t with - | TNamed ({ tname = "atomic_t" | "pthread_mutex_t" | "pthread_rwlock_t" | "pthread_spinlock_t" | "spinlock_t" | "pthread_cond_t"; _ }, _) -> true - | TComp ({ cname = "__pthread_mutex_s" | "__pthread_rwlock_arch_t" | "__jmp_buf_tag" | "_pthread_cleanup_buffer" | "__pthread_cleanup_frame" | "__cancel_jmp_buf_tag"; _}, _) -> true - | TComp ({ cname; _}, _) when String.starts_with_stdlib ~prefix:"__anon" cname -> +let is_ignorable_comp_name = function + | "__pthread_mutex_s" | "__pthread_rwlock_arch_t" | "__jmp_buf_tag" | "_pthread_cleanup_buffer" | "__pthread_cleanup_frame" | "__cancel_jmp_buf_tag" | "_IO_FILE" -> true + | cname when String.starts_with_stdlib ~prefix:"__anon" cname -> begin match Cilfacade.split_anoncomp_name cname with | (true, Some ("__once_flag" | "__pthread_unwind_buf_t" | "__cancel_jmp_buf"), _) -> true (* anonstruct *) | (false, Some ("pthread_mutexattr_t" | "pthread_condattr_t" | "pthread_barrierattr_t"), _) -> true (* anonunion *) | _ -> false end - | TComp ({ cname = "lock_class_key"; _ }, _) -> true - | TInt (IInt, attr) when hasAttribute "mutex" attr -> true - | t when hasAttribute "atomic" (typeAttrs t) -> true (* C11 _Atomic *) + | "lock_class_key" -> true (* kernel? *) | _ -> false -let is_ignorable = function - | None -> false - | Some (v,os) when hasAttribute "thread" v.vattr && not (v.vaddrof) -> true (* Thread-Local Storage *) - | Some (v,os) when BaseUtil.is_volatile v && not (get_bool "ana.race.volatile") -> true (* volatile & races on volatiles should not be reported *) - | Some (v,os) -> - try isFunctionType v.vtype || is_ignorable_type v.vtype - with Not_found -> false +let is_ignorable_attrs attrs = + let is_ignorable_attr = function + | Attr ("volatile", _) when not (get_bool "ana.race.volatile") -> true (* volatile & races on volatiles should not be reported *) + | Attr ("atomic", _) -> true (* C11 _Atomic *) + | _ -> false + in + List.exists is_ignorable_attr attrs + +let rec is_ignorable_type (t: typ): bool = + (* efficient pattern matching first *) + match t with + | TNamed ({ tname = "atomic_t" | "pthread_mutex_t" | "pthread_rwlock_t" | "pthread_spinlock_t" | "spinlock_t" | "pthread_cond_t" | "atomic_flag" | "FILE" | "__FILE"; _ }, _) -> true + | TComp ({ cname; _}, _) when is_ignorable_comp_name cname -> true + | TInt (IInt, attr) when hasAttribute "mutex" attr -> true (* kernel? *) + | TFun _ -> true + | _ -> + if is_ignorable_attrs (typeAttrsOuter t) then (* only outer because we unroll TNamed ourselves *) + true + else ( + (* unroll TNamed once *) + (* can't use unrollType because we want to check TNamed-s at all intermediate typedefs as well *) + match t with + | TNamed ({ttype; _}, attrs) -> is_ignorable_type (typeAddAttributes attrs ttype) + | _ -> false + ) + +let rec is_ignorable_type_offset (t: typ) (o: _ Offset.t): bool = + (* similar to Cilfacade.typeOffset but we want to check types at all intermediate offsets as well *) + if is_ignorable_type t then + true (* type at offset so far ignorable, no need to recurse *) + else ( + match o with + | `NoOffset -> false (* already checked t *) + | `Index (_, o') -> + begin match unrollType t with + | TArray (et, _, attrs) -> + let t' = Cilfacade.typeBlendAttributes attrs et in + is_ignorable_type_offset t' o' + | _ -> false (* index on non-array *) + end + | `Field (f, o') -> + begin match unrollType t with + | TComp (_, attrs) -> + let t' = Cilfacade.typeBlendAttributes attrs f.ftype in + is_ignorable_type_offset t' o' + | _ -> false (* field on non-compound *) + end + ) + +(** {!is_ignorable_type} for {!typsig}. *) +let is_ignorable_typsig (ts: typsig): bool = + (* efficient pattern matching first *) + match ts with + | TSComp (_, cname, _) when is_ignorable_comp_name cname -> true + | TSFun _ -> true + | TSBase t -> is_ignorable_type t + | _ -> is_ignorable_attrs (typeSigAttrs ts) + +(** {!is_ignorable_type_offset} for {!typsig}. *) +let rec is_ignorable_typsig_offset (ts: typsig) (o: _ Offset.t): bool = + if is_ignorable_typsig ts then + true (* type at offset so far ignorable, no need to recurse *) + else ( + match o with + | `NoOffset -> false (* already checked t *) + | `Index (_, o') -> + begin match ts with + | TSArray (ets, _, attrs) -> + let ts' = Cilfacade.typeSigBlendAttributes attrs ets in + is_ignorable_typsig_offset ts' o' + | _ -> false (* index on non-array *) + end + | `Field (f, o') -> + begin match ts with + | TSComp (_, _, attrs) -> + let t' = Cilfacade.typeBlendAttributes attrs f.ftype in + is_ignorable_type_offset t' o' (* switch to type because it is more precise with TNamed *) + | _ -> false (* field on non-compound *) + end + ) + +let is_ignorable_mval = function + | ({vaddrof = false; vattr; _}, _) when hasAttribute "thread" vattr -> true (* Thread-Local Storage *) + | (v, o) -> is_ignorable_type_offset v.vtype o (* can't use Cilfacade.typeOffset because we want to check types at all intermediate offsets as well *) + +let is_ignorable_memo = function + | (`Type ts, o) -> is_ignorable_typsig_offset ts o + | (`Var v, o) -> is_ignorable_mval (v, o) module TSH = Hashtbl.Make (CilType.Typsig) @@ -195,8 +272,7 @@ let get_val_type e: acc_typ = (** Add access to {!Memo} after distributing. *) let add_one ~side memo: unit = - let mv = Memo.to_mval memo in - let ignorable = is_ignorable mv in + let ignorable = is_ignorable_memo memo in if M.tracing then M.trace "access" "add_one %a (ignorable = %B)\n" Memo.pretty memo ignorable; if not ignorable then side memo diff --git a/src/domains/queries.ml b/src/domains/queries.ml index 1a7ec2276d..c706339bf2 100644 --- a/src/domains/queries.ml +++ b/src/domains/queries.ml @@ -101,7 +101,10 @@ type _ t = | IterVars: itervar -> Unit.t t | PathQuery: int * 'a t -> 'a t (** Query only one path under witness lifter. *) | DYojson: FlatYojson.t t (** Get local state Yojson of one path under [PathQuery]. *) - | HeapVar: VI.t t + | AllocVar: {on_stack: bool} -> VI.t t + (* Create a variable representing a dynamic allocation-site *) + (* If on_stack is [true], then the dynamic allocation is on the stack (i.e., alloca() or a similar function was called). Otherwise, allocation is on the heap *) + | IsAllocVar: varinfo -> MayBool.t t (* [true] if variable represents dynamically allocated memory *) | IsHeapVar: varinfo -> MayBool.t t (* TODO: is may or must? *) | IsMultiple: varinfo -> MustBool.t t (* For locals: Is another copy of this local variable reachable via pointers? *) @@ -154,6 +157,7 @@ struct | MayBePublicWithout _ -> (module MayBool) | MayBeThreadReturn -> (module MayBool) | IsHeapVar _ -> (module MayBool) + | IsAllocVar _ -> (module MayBool) | MustBeProtectedBy _ -> (module MustBool) | MustBeAtomic -> (module MustBool) | MustBeSingleThreaded _ -> (module MustBool) @@ -165,7 +169,7 @@ struct | BlobSize _ -> (module ID) | CurrentThreadId -> (module ThreadIdDomain.ThreadLifted) | ThreadCreateIndexedNode -> (module ThreadNodeLattice) - | HeapVar -> (module VI) + | AllocVar _ -> (module VI) | EvalStr _ -> (module SD) | IterPrevVars _ -> (module Unit) | IterVars _ -> (module Unit) @@ -218,6 +222,7 @@ struct | MayBePublicWithout _ -> MayBool.top () | MayBeThreadReturn -> MayBool.top () | IsHeapVar _ -> MayBool.top () + | IsAllocVar _ -> MayBool.top () | MutexType _ -> MutexAttrDomain.top () | MustBeProtectedBy _ -> MustBool.top () | MustBeAtomic -> MustBool.top () @@ -230,7 +235,7 @@ struct | BlobSize _ -> ID.top () | CurrentThreadId -> ThreadIdDomain.ThreadLifted.top () | ThreadCreateIndexedNode -> ThreadNodeLattice.top () - | HeapVar -> VI.top () + | AllocVar _ -> VI.top () | EvalStr _ -> SD.top () | IterPrevVars _ -> Unit.top () | IterVars _ -> Unit.top () @@ -290,7 +295,7 @@ struct | Any (PartAccess _) -> 23 | Any (IterPrevVars _) -> 24 | Any (IterVars _) -> 25 - | Any HeapVar -> 29 + | Any (AllocVar _) -> 29 | Any (IsHeapVar _) -> 30 | Any (IsMultiple _) -> 31 | Any (EvalThread _) -> 32 @@ -315,6 +320,7 @@ struct | Any ThreadCreateIndexedNode -> 51 | Any ThreadsJoinedCleanly -> 52 | Any (TmpSpecial _) -> 53 + | Any (IsAllocVar _) -> 54 let rec compare a b = let r = Stdlib.compare (order a) (order b) in @@ -354,6 +360,7 @@ struct else compare (Any q1) (Any q2) | Any (IsHeapVar v1), Any (IsHeapVar v2) -> CilType.Varinfo.compare v1 v2 + | Any (IsAllocVar v1), Any (IsAllocVar v2) -> CilType.Varinfo.compare v1 v2 | Any (IsMultiple v1), Any (IsMultiple v2) -> CilType.Varinfo.compare v1 v2 | Any (EvalThread e1), Any (EvalThread e2) -> CilType.Exp.compare e1 e2 | Any (EvalJumpBuf e1), Any (EvalJumpBuf e2) -> CilType.Exp.compare e1 e2 @@ -394,6 +401,7 @@ struct | Any (IterVars i) -> 0 | Any (PathQuery (i, q)) -> 31 * i + hash (Any q) | Any (IsHeapVar v) -> CilType.Varinfo.hash v + | Any (IsAllocVar v) -> CilType.Varinfo.hash v | Any (IsMultiple v) -> CilType.Varinfo.hash v | Any (EvalThread e) -> CilType.Exp.hash e | Any (EvalJumpBuf e) -> CilType.Exp.hash e @@ -441,8 +449,9 @@ struct | Any (IterPrevVars i) -> Pretty.dprintf "IterPrevVars _" | Any (IterVars i) -> Pretty.dprintf "IterVars _" | Any (PathQuery (i, q)) -> Pretty.dprintf "PathQuery (%d, %a)" i pretty (Any q) - | Any HeapVar -> Pretty.dprintf "HeapVar" + | Any (AllocVar {on_stack = on_stack}) -> Pretty.dprintf "AllocVar %b" on_stack | Any (IsHeapVar v) -> Pretty.dprintf "IsHeapVar %a" CilType.Varinfo.pretty v + | Any (IsAllocVar v) -> Pretty.dprintf "IsAllocVar %a" CilType.Varinfo.pretty v | Any (IsMultiple v) -> Pretty.dprintf "IsMultiple %a" CilType.Varinfo.pretty v | Any (EvalThread e) -> Pretty.dprintf "EvalThread %a" CilType.Exp.pretty e | Any (EvalJumpBuf e) -> Pretty.dprintf "EvalJumpBuf %a" CilType.Exp.pretty e diff --git a/src/util/cilfacade.ml b/src/util/cilfacade.ml index eb7330aa19..6d55211c8d 100644 --- a/src/util/cilfacade.ml +++ b/src/util/cilfacade.ml @@ -322,6 +322,15 @@ and typeOffset basetyp = | t -> raise (TypeOfError (Field_NonCompound (fi, t))) +let typeBlendAttributes baseAttrs = (* copied from Cilfacade.typeOffset *) + let (_, _, contageous) = partitionAttributes ~default:AttrName baseAttrs in + typeAddAttributes contageous + +let typeSigBlendAttributes baseAttrs = + let (_, _, contageous) = partitionAttributes ~default:AttrName baseAttrs in + typeSigAddAttrs contageous + + (** {!Cil.mkCast} using our {!typeOf}. *) let mkCast ~(e: exp) ~(newt: typ) = let oldt = diff --git a/tests/regression/04-mutex/62-simple_atomic_nr.c b/tests/regression/04-mutex/62-simple_atomic_nr.c index d63f303251..fdef44bdd6 100644 --- a/tests/regression/04-mutex/62-simple_atomic_nr.c +++ b/tests/regression/04-mutex/62-simple_atomic_nr.c @@ -1,24 +1,83 @@ #include -#include #include -atomic_int myglobal; -pthread_mutex_t mutex1 = PTHREAD_MUTEX_INITIALIZER; -pthread_mutex_t mutex2 = PTHREAD_MUTEX_INITIALIZER; +atomic_int g1; +_Atomic int g2; +_Atomic(int) g3; + +atomic_int a1[1]; +_Atomic int a2[1]; +_Atomic(int) a3[1]; + +struct s { + int f0; + atomic_int f1; + _Atomic int f2; + _Atomic(int) f3; +}; + +struct s s1; +_Atomic struct s s2; +_Atomic(struct s) s3; + +typedef atomic_int t_int1; +typedef _Atomic int t_int2; +typedef _Atomic(int) t_int3; + +t_int1 t1; +t_int2 t2; +t_int3 t3; + +typedef int t_int0; + +_Atomic t_int0 t0; +_Atomic(t_int0) t00; + +atomic_int *p0 = &g1; +int x; +// int * _Atomic p1 = &x; // TODO: https://github.com/goblint/cil/issues/64 +// _Atomic(int*) p2 = &x; // TODO: https://github.com/goblint/cil/issues/64 +// atomic_int * _Atomic p3 = &g1; // TODO: https://github.com/goblint/cil/issues/64 + +atomic_flag flag = ATOMIC_FLAG_INIT; void *t_fun(void *arg) { - pthread_mutex_lock(&mutex1); - myglobal=myglobal+1; // NORACE - pthread_mutex_unlock(&mutex1); + g1++; // NORACE + g2++; // NORACE + g3++; // NORACE + a1[0]++; // NORACE + a2[0]++; // NORACE + a3[0]++; // NORACE + s1.f1++; // NORACE + s1.f2++; // NORACE + s1.f3++; // NORACE + s2.f0++; // NORACE + s3.f0++; // NORACE + t1++; // NORACE + t2++; // NORACE + t3++; // NORACE + t0++; // NORACE + t00++; // NORACE + (*p0)++; // NORACE + // p1++; // TODO NORACE: https://github.com/goblint/cil/issues/64 + // p2++; // TODO NORACE: https://github.com/goblint/cil/issues/64 + // p3++; // TODO NORACE: https://github.com/goblint/cil/issues/64 + // (*p3)++; // TODO NORACE: https://github.com/goblint/cil/issues/64 + + struct s ss = {0}; + s2 = ss; // NORACE + s3 = ss; // NORACE + + atomic_flag_clear(&flag); // NORACE + atomic_flag_test_and_set(&flag); // NORACE return NULL; } int main(void) { - pthread_t id; + pthread_t id, id2; pthread_create(&id, NULL, t_fun, NULL); - pthread_mutex_lock(&mutex2); - myglobal=myglobal+1; // NORACE - pthread_mutex_unlock(&mutex2); - pthread_join (id, NULL); + pthread_create(&id2, NULL, t_fun, NULL); + pthread_join(id, NULL); + pthread_join(id2, NULL); return 0; } diff --git a/tests/regression/04-mutex/99-volatile.c b/tests/regression/04-mutex/99-volatile.c index aaf81f13a1..7c2a255902 100644 --- a/tests/regression/04-mutex/99-volatile.c +++ b/tests/regression/04-mutex/99-volatile.c @@ -1,18 +1,53 @@ // PARAM: --disable ana.race.volatile #include -#include -volatile int myglobal; +volatile int g1; + +volatile int a1[1]; + +struct s { + int f0; + volatile int f1; +}; + +struct s s1; +volatile struct s s2; + +typedef volatile int t_int1; + +t_int1 t1; + +typedef int t_int0; + +volatile t_int0 t0; + +volatile int *p0 = &g1; +int x; +int * volatile p1 = &x; +volatile int * volatile p2 = &g1; void *t_fun(void *arg) { - myglobal= 8; //NORACE + g1++; // NORACE + a1[0]++; // NORACE + s1.f1++; // NORACE + s2.f0++; // NORACE + t1++; // NORACE + t0++; // NORACE + (*p0)++; // NORACE + p1++; // NORACE + p2++; // NORACE + (*p2)++; // NORACE + + struct s ss = {0}; + s2 = ss; // NORACE return NULL; } int main(void) { - pthread_t id; - pthread_create(&id, NULL, t_fun, (void*) &myglobal); - myglobal = 42; //NORACE - pthread_join (id, NULL); + pthread_t id, id2; + pthread_create(&id, NULL, t_fun, NULL); + pthread_create(&id2, NULL, t_fun, NULL); + pthread_join(id, NULL); + pthread_join(id2, NULL); return 0; -} \ No newline at end of file +} diff --git a/tests/regression/05-lval_ls/23-race-null-type-deep.c b/tests/regression/05-lval_ls/23-race-null-type-deep.c index 6f29964d1e..f7de758d8f 100644 --- a/tests/regression/05-lval_ls/23-race-null-type-deep.c +++ b/tests/regression/05-lval_ls/23-race-null-type-deep.c @@ -1,9 +1,15 @@ +// PARAM: --disable sem.unknown_function.invalidate.globals --disable sem.unknown_function.spawn #include -#include + +struct s { + int f; +}; + +extern void magic(struct s *p); void *t_fun(void *arg) { void *top; - fclose(top); // RACE + magic(top); // RACE return NULL; } @@ -21,31 +27,31 @@ int main(void) { void *top; switch (r) { case 0: - feof(NULL); // NORACE + magic(NULL); // NORACE break; case 1: - feof(0); // NORACE + magic(0); // NORACE break; case 2: - feof(zero); // NORACE + magic(zero); // NORACE break; case 3: - feof(1); // RACE + magic(1); // RACE break; case 4: - feof(one); // RACE + magic(one); // RACE break; case 5: - feof(r); // RACE + magic(r); // RACE break; case 6: - feof(null); // NORACE + magic(null); // NORACE break; case 7: - feof(unknown); // RACE + magic(unknown); // RACE break; case 8: - feof(top); // RACE + magic(top); // RACE break; default: break; diff --git a/tests/regression/56-witness/60-tm-inv-transfer-protection.c b/tests/regression/56-witness/60-tm-inv-transfer-protection.c index 3d5bcbc871..07260adbdd 100644 --- a/tests/regression/56-witness/60-tm-inv-transfer-protection.c +++ b/tests/regression/56-witness/60-tm-inv-transfer-protection.c @@ -35,12 +35,12 @@ int main(void) { __goblint_check(g >= 40); __goblint_check(g <= 41); // UNKNOWN (lacks expressivity) pthread_mutex_unlock(&C); - pthread_mutex_unlock(&C); - + pthread_mutex_unlock(&B); + pthread_mutex_lock(&C); __goblint_check(g >= 40); __goblint_check(g <= 42); // UNKNOWN (widen) pthread_mutex_unlock(&C); - + return 0; } diff --git a/tests/regression/56-witness/61-tm-inv-transfer-mine.c b/tests/regression/56-witness/61-tm-inv-transfer-mine.c index 8f912bc2d9..cd8301fb39 100644 --- a/tests/regression/56-witness/61-tm-inv-transfer-mine.c +++ b/tests/regression/56-witness/61-tm-inv-transfer-mine.c @@ -35,12 +35,12 @@ int main(void) { __goblint_check(g >= 40); __goblint_check(g <= 41); pthread_mutex_unlock(&C); - pthread_mutex_unlock(&C); - + pthread_mutex_unlock(&B); + pthread_mutex_lock(&C); - __goblint_check(g >= 40); + __goblint_check(g >= 40); // TODO why? __goblint_check(g <= 42); pthread_mutex_unlock(&C); - + return 0; } \ No newline at end of file diff --git a/tests/regression/56-witness/62-tm-inv-transfer-protection-witness.c b/tests/regression/56-witness/62-tm-inv-transfer-protection-witness.c index 7be5bcf53e..68aada7394 100644 --- a/tests/regression/56-witness/62-tm-inv-transfer-protection-witness.c +++ b/tests/regression/56-witness/62-tm-inv-transfer-protection-witness.c @@ -35,12 +35,12 @@ int main(void) { __goblint_check(g >= 40); __goblint_check(g <= 41); // UNKNOWN (lacks expressivity) pthread_mutex_unlock(&C); - pthread_mutex_unlock(&C); - + pthread_mutex_unlock(&B); + pthread_mutex_lock(&C); __goblint_check(g >= 40); __goblint_check(g <= 42); pthread_mutex_unlock(&C); - + return 0; } \ No newline at end of file diff --git a/tests/regression/74-use_after_free/14-alloca-uaf.c b/tests/regression/74-use_after_free/14-alloca-uaf.c new file mode 100644 index 0000000000..3dc494cb09 --- /dev/null +++ b/tests/regression/74-use_after_free/14-alloca-uaf.c @@ -0,0 +1,16 @@ +//PARAM: --set ana.activated[+] useAfterFree +#include +#include + +int *f() { + int *c = alloca(sizeof(int)); + return c; +} + +int main(int argc, char const *argv[]) { + int *ps = alloca(sizeof(int)); + int *c = f(); + int a = *ps; //NOWARN + int b = *c; //WARN + return 0; +} diff --git a/tests/regression/75-invalid_dealloc/09-juliet-invalid-dealloc-alloca.c b/tests/regression/75-invalid_dealloc/09-juliet-invalid-dealloc-alloca.c new file mode 100644 index 0000000000..9a84d1e49a --- /dev/null +++ b/tests/regression/75-invalid_dealloc/09-juliet-invalid-dealloc-alloca.c @@ -0,0 +1,75 @@ +#include +#include + +typedef struct twoIntsStruct { + int intOne ; + int intTwo ; +} twoIntsStruct; + +void CWE590_Free_Memory_Not_on_Heap__free_struct_alloca_54_bad(void) { + twoIntsStruct *data; + data = (twoIntsStruct *)0; + { + twoIntsStruct *dataBuffer = __builtin_alloca(800UL); + { + size_t i; + i = 0UL; + + goto ldv_3204; + ldv_3203: + ; + + (dataBuffer + i)->intOne = 1; + (dataBuffer + i)->intTwo = 1; + + i += 1UL; + ldv_3204: + ; + + if (i <= 99UL) + goto ldv_3203; + else + goto ldv_3205; + ldv_3205: + ; + } + + data = dataBuffer; + } + + CWE590_Free_Memory_Not_on_Heap__free_struct_alloca_54b_badSink(data); + return; +} + +void CWE590_Free_Memory_Not_on_Heap__free_struct_alloca_54b_badSink(twoIntsStruct *data) { + CWE590_Free_Memory_Not_on_Heap__free_struct_alloca_54c_badSink(data); + return; +} + +void CWE590_Free_Memory_Not_on_Heap__free_struct_alloca_54c_badSink(twoIntsStruct *data) { + CWE590_Free_Memory_Not_on_Heap__free_struct_alloca_54d_badSink(data); + return; +} + +void CWE590_Free_Memory_Not_on_Heap__free_struct_alloca_54d_badSink(twoIntsStruct *data) { + CWE590_Free_Memory_Not_on_Heap__free_struct_alloca_54e_badSink(data); + return; +} + +void CWE590_Free_Memory_Not_on_Heap__free_struct_alloca_54e_badSink(twoIntsStruct *data) { + free((void *)data); //WARN + return; +} + +int main(int argc, char **argv) { + int __retres; + { + CWE590_Free_Memory_Not_on_Heap__free_struct_alloca_54_bad(); + __retres = 0; + goto return_label; + } + + __retres = 0; + return_label: + return __retres; +} diff --git a/tests/regression/77-mem-oob/06-memset-oob.c b/tests/regression/77-mem-oob/06-memset-oob.c new file mode 100644 index 0000000000..931f7eaa8c --- /dev/null +++ b/tests/regression/77-mem-oob/06-memset-oob.c @@ -0,0 +1,54 @@ +// PARAM: --set ana.activated[+] memOutOfBounds --enable ana.int.interval --disable warn.info +// TODO: The "--disable warn.info" part is a temporary fix and needs to be removed once the MacOS CI job is fixed +#include +#include +#include + +typedef struct s { + int a; + char b; +} s; + +int main(int argc, char const *argv[]) { + int *a = malloc(10 * sizeof(int)); //Size is 40 bytes, assuming a 4-byte int + + memset(a, 0, 40); //NOWARN + memset(a, 0, 10 * sizeof(int)); //NOWARN + memset(a, 0, 41); //WARN + memset(a, 0, 40000000); //WARN + + int d; + + if (argc == 15) { + int c = 55; + a = &c; + memset(a, 0, argv[5]); //WARN + } else if (argv[2] == 2) { + a = &d; + } + + memset(a, 0, 40); //WARN + + int input; + scanf("%d", &input); + memset(a, 0, input); //WARN + + + + int *b = malloc(15 * sizeof(int)); //Size is 60 bytes, assuming a 4-byte int + memset(b, 0, 60); //NOWARN + b += 1; + memset(b, 0, 60); //WARN + + + + s *s_ptr = malloc(sizeof(s)); + memset(s_ptr, 0, sizeof(s)); //NOWARN + memset(s_ptr->a, 0, sizeof(s)); //WARN + memset(s_ptr->b, 0, sizeof(s)); //WARN + + s_ptr = s_ptr->a; + memset(s_ptr, 0, sizeof(s)); //WARN + + return 0; +} diff --git a/tests/regression/77-mem-oob/07-memcpy-oob.c b/tests/regression/77-mem-oob/07-memcpy-oob.c new file mode 100644 index 0000000000..012f92996e --- /dev/null +++ b/tests/regression/77-mem-oob/07-memcpy-oob.c @@ -0,0 +1,53 @@ +// PARAM: --set ana.activated[+] memOutOfBounds --enable ana.int.interval --disable warn.info +// TODO: The "--disable warn.info" part is a temporary fix and needs to be removed once the MacOS CI job is fixed +#include +#include + +typedef struct s { + int a; + char b; +} s; + +int main(int argc, char const *argv[]) { + int *a = malloc(10 * sizeof(int)); //Size is 40 bytes, assuming a 4-byte int + int *b = malloc(15 * sizeof(int)); //Size is 60 bytes, assuming a 4-byte int + + memcpy(a, b, 40); //NOWARN + memcpy(a, b, 10 * sizeof(int)); //NOWARN + memcpy(a, b, 41); //WARN + memcpy(a, b, 40000000); //WARN + memcpy(a, b, 15 * sizeof(int)); //WARN + + int d; + + if (*argv == 42) { + a = &d; + } else if (*(argv + 5)) { + int random = rand(); + a = &random; + memcpy(a, b, 40); //WARN + } + + memcpy(a, b, 40); //WARN + memcpy(a, b, sizeof(a)); //WARN + + memcpy(b, a, 60); //NOWARN + b += 1; + memcpy(b, a, 60); //WARN + + + s *s_ptr = malloc(sizeof(s)); + memcpy(s_ptr, a, sizeof(s)); //NOWARN + memcpy(s_ptr->a, 0, sizeof(s)); //WARN + memcpy(s_ptr->b, 0, sizeof(s)); //WARN + + memcpy(s_ptr, a, 40); //WARN + memcpy(s_ptr, a, 60); //WARN + memcpy(s_ptr, b, 40); //WARN + memcpy(s_ptr, b, 60); //WARN + + s_ptr = s_ptr->b; + memcpy(s_ptr, a, sizeof(s)); //WARN + + return 0; +} diff --git a/tests/regression/77-mem-oob/08-memset-memcpy-array.c b/tests/regression/77-mem-oob/08-memset-memcpy-array.c new file mode 100644 index 0000000000..f231ba2dc4 --- /dev/null +++ b/tests/regression/77-mem-oob/08-memset-memcpy-array.c @@ -0,0 +1,43 @@ +// PARAM: --set ana.activated[+] memOutOfBounds --enable ana.int.interval --disable warn.info +// TODO: The "--disable warn.info" part is a temporary fix and needs to be removed once the MacOS CI job is fixed +#include +#include + +int main(int argc, char const *argv[]) { + int arr[42]; // Size should be 168 bytes (with 4 byte ints) + int *b = arr; + + + memset(b, 0, 168); //NOWARN + memset(b, 0, sizeof(arr)); //NOWARN + memset(b, 0, 169); //WARN + memset(b, 0, sizeof(arr) + 1); //WARN + + int *c = malloc(sizeof(arr)); // Size should be 168 bytes (with 4 byte ints) + memcpy(b, c, 168); //NOWARN + memcpy(b, c, sizeof(arr)); //NOWARN + memcpy(b, c, 169); //WARN + memcpy(b, c, sizeof(arr) + 1); //WARN + + int d; + + if (*argv == 42) { + b = &d; + memset(b, 0, 168); //WARN + memcpy(b, c, 168); //WARN + } else if (*(argv + 5)) { + int random = rand(); + b = &random; + memset(b, 0, 168); //WARN + memcpy(b, c, 168); //WARN + } + + memset(b, 0, sizeof(arr)); //WARN + memcpy(b, c, sizeof(arr)); //WARN + memset(b, 0, sizeof(int)); //NOWARN + memcpy(b, c, sizeof(int)); //NOWARN + memset(b, 0, sizeof(int) + 1); //WARN + memcpy(b, c, sizeof(int) + 1); //WARN + + return 0; +} diff --git a/tests/regression/77-mem-oob/09-memset-memcpy-addr-offs.c b/tests/regression/77-mem-oob/09-memset-memcpy-addr-offs.c new file mode 100644 index 0000000000..725024946e --- /dev/null +++ b/tests/regression/77-mem-oob/09-memset-memcpy-addr-offs.c @@ -0,0 +1,20 @@ +// PARAM: --set ana.activated[+] memOutOfBounds --enable ana.int.interval --disable warn.info +// TODO: The "--disable warn.info" part is a temporary fix and needs to be removed once the MacOS CI job is fixed +#include +#include + +int main(int argc, char const *argv[]) { + int *a = malloc(10 * sizeof(int)); //Size is 40 bytes, assuming a 4-byte int + int *b = malloc(15 * sizeof(int)); //Size is 60 bytes, assuming a 4-byte int + + memset(a, 0, 40); //NOWARN + memcpy(a, b, 40); //NOWARN + + a += 3; + + memset(a, 0, 40); //WARN + memcpy(a, b, 40); //WARN + + memset(a, 0, 37); //NOWARN + memcpy(a, b, 37); //NOWARN +} \ No newline at end of file