Skip to content

Commit

Permalink
correct dpc
Browse files Browse the repository at this point in the history
  • Loading branch information
vicjulrin committed Oct 4, 2023
1 parent 8ae3cce commit 190d76b
Show file tree
Hide file tree
Showing 2 changed files with 47 additions and 33 deletions.
76 changes: 46 additions & 30 deletions scripts/00_dpc_analyisis/dpc_analyisis.R
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

# Install necessary libraries - packages
packagesPrev<- installed.packages()[,"Package"] # Check and get a list of installed packages in this machine and R version
packagesNeed<- list("magrittr", "terra", "raster", "sf", "fasterize", "pbapply", "this.path", "rjson", "units") # Define the list of required packages to run the script
packagesNeed<- list("magrittr", "terra", "raster", "sf", "fasterize", "pbapply", "this.path", "rjson") # Define the list of required packages to run the script
lapply(packagesNeed, function(x) { if ( ! x %in% packagesPrev ) { install.packages(x, force=T)} }) # Check and install required packages that are not previously installed

# Load libraries
Expand All @@ -23,7 +23,8 @@ lapply(packagesList, library, character.only = TRUE) # Load libraries - package
Sys.getenv("SCRIPT_LOCATION")

# Option 2: Recommended for debugging purposes to be used as a testing environment. This is designed to facilitate script testing and correction
# outputFolder<- {x<- this.path::this.path(); file_prev<- paste0(gsub("/scripts.*", "/output", x), gsub("^.*/scripts", "", x) ); options<- tools::file_path_sans_ext(file_prev) %>% {c(paste0(., ".R"), paste0(., "_R"))}; folder_out<- options %>% {.[file.exists(.)]} %>% {.[which.max(sapply(., function(info) file.info(info)$mtime))]}; folder_final<- list.files(folder_out, full.names = T) %>% {.[which.max(sapply(., function(info) file.info(info)$mtime))]} }
# outputFolder<- {x<- this.path::this.path(); file_prev<- paste0(gsub("/scripts.*", "/output", x), gsub("^.*/scripts", "", x) ); options<- tools::file_path_sans_ext(file_prev) %>% {c(., paste0(., ".R"), paste0(., "_R"))}; folder_out<- options %>% {.[file.exists(.)]} %>% {.[which.max(sapply(., function(info) file.info(info)$mtime))]}; folder_final<- list.files(folder_out, full.names = T) %>% {.[which.max(sapply(., function(info) file.info(info)$mtime))]} }




Expand All @@ -33,9 +34,8 @@ Sys.getenv("SCRIPT_LOCATION")
input <- rjson::fromJSON(file=file.path(outputFolder, "input.json")) # Load input file

# This section adjusts the input values based on specific conditions to rectify and prevent errors in the input paths
input<- lapply(input, function(x) if( grepl("/output/", x) ){
sub(".*/output/", "/output/", x) %>% {gsub("/output.*", ., outputFolder)}}else{x} ) # Ajuste input 1

input<- lapply(input, function(x) if(!is.null(x)){ if( grepl("/", x) ){
sub("/output/.*", "/output", outputFolder) %>% dirname() %>% file.path(x) %>% {gsub("//+", "/", .)} }else{x}} ) # adjust input 1



Expand All @@ -47,20 +47,20 @@ input<- lapply(input, function(x) if( grepl("/output/", x) ){
output<- tryCatch({



units::units_options(set_units_mode = "standard")

group<- input$column_spatial_unit

spatial_unit_data<- read.csv(input$data_spatial_unit) %>%
dplyr::mutate(group_n= paste0(!!!dplyr::syms(group)) %>% {gsub(" ", "_", .)}) %>%
dplyr::select(c(input$column_spatial_unit, input$column_date, input$column_area, "group_n")) %>%
dplyr::filter(!duplicated(group_n)) %>% dplyr::mutate(spatial_unit= "yes_spatial_unit_data")

spatial_unit_data$date<- as.Date(spatial_unit_data$created_date, format = "%Y-%m-%d")


start_date<- {if(input$time_start == "NA"){min(spatial_unit_data$date)}else{input$time_start}} %>% lubridate::floor_date(unit = input$time_interval)
end_date<- {if(input$time_end == "NA"){max(spatial_unit_data$date)}else{input$time_end}} %>% lubridate::ceiling_date(unit = input$time_interval)
dplyr::filter(!duplicated(group_n)) %>% dplyr::mutate(spatial_unit= "yes_spatial_unit", )

input$area_study_area<- input$area_study_area %>% {if( is.null(.) ){sum( spatial_unit_data[,input$column_area] )*2 } else {.}}

start_date<- {if( is.null(input$time_start) ){min(spatial_unit_data$date)}else{input$time_start}} %>% lubridate::floor_date(unit = input$time_interval)
end_date<- {if( is.null(input$time_end) ){max(spatial_unit_data$date)}else{input$time_end}} %>% lubridate::ceiling_date(unit = input$time_interval)


period_time<- paste(timechange:::parse_rounding_unit(input$time_interval), collapse = " " )
Expand All @@ -80,9 +80,9 @@ output<- tryCatch({
data_period<- lapply(new_periods, function(j) dplyr::mutate(spatial_unit_data_v2[i,], period= j)) %>% plyr::rbind.fill()
}) %>% plyr::rbind.fill()

spatial_unit_no_area<- dplyr::select(spatial_unit_area, c("group_n", "period", "area_spatial")) %>%
spatial_unit_no_area<- spatial_unit_area %>%
dplyr::distinct() %>% dplyr::group_by(period) %>%
dplyr::summarize( area_spatial = input$area_study_area - sum(area_spatial)) %>%
dplyr::summarise( !!input$column_area := input$area_study_area - sum( .data[[ input$column_area ]] )) %>%
dplyr::mutate(spatial_unit= "no_spatial_unit")


Expand All @@ -91,15 +91,19 @@ output<- tryCatch({



spatial_units_periods<- read.csv("C:/Users/LENOVO/Documents/pa_ejemplo_victor_v2.csv")


#Define the decades reported for the protected areas creation to start the analysis
decades = unique(spatial_units_periods$period)

#Create the table result
result_dPC = as.data.frame(matrix(NA,ncol = 4, nrow = 0))
colnames(result_dPC) = c("Period", "group_n","PC_out","PC")

dPC = as.data.frame(matrix(NA,ncol = 6, nrow = 0))
colnames(dPC) = c("Period", "group_n", "name", "PC_out","PC" , "dPC")

#




Expand All @@ -125,6 +129,8 @@ output<- tryCatch({
}




#Calculate the PC value
PC = addition/(area_consult*area_consult)
#result_pc= data.frame(Period = i,PC=PC)
Expand All @@ -148,31 +154,41 @@ output<- tryCatch({
add = add+sum(prod)
}


PC_out_id = add/(area_consult*area_consult)
result_PC_out = data.frame(Period = i,group_n = ids[k], PC_out = PC_out_id, PC = PC)
result_dPC = rbind.data.frame(result_dPC,result_PC_out)
}

result_dPC2<- result_dPC
result_dPC2$dPC = (((result_dPC$PC- result_dPC$PC_out))/result_dPC$PC)*100

#Calculate the dPC index by period of time
for (i in decades) {
filter = result_dPC[result_dPC$Period==decades[i],]
order = filter[with(filter, order(-filter$dPC)), ]
if(nrow(order)>=5){
dpc_decade_df = order[5,]
} else{dpc_decade_df = order}
result_dPC2<- result_dPC
result_dPC2$dPC<- (((result_dPC$PC- result_dPC$PC_out))/result_dPC$PC) %>% sapply(function(x) {if(is.na(x)){0}else{x*100}} )

dPC = rbind.data.frame(dPC,dpc_decade_df)
}

}




#Calculate the dPC index by period of time
dPC = as.data.frame(matrix(NA,ncol = 3, nrow = 0))
colnames(dPC) = c("Period", "id_pa","dPC")

#Calculate the dPC index by period of time
for (i in as.character(decades)) {
filter = result_dPC2[result_dPC2$Period== i,]
order = filter[with(filter, order(-filter$dPC)), ]
if(nrow(order)>=5){
dpc_decade_df = order[5,]
} else{dpc_decade_df = order}

########################PEDAZO QUE FALTA##################################

dPC = rbind.data.frame(dPC,dpc_decade_df)
}






# Define and export the output values

dPC_template<- dplyr::select(spatial_unit_data,c("group_n", group)) %>% dplyr::distinct()
Expand Down
4 changes: 1 addition & 3 deletions scripts/00_dpc_analyisis/dpc_analyisis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ inputs:
label: Area column name
description: "String refering to the name of the column in the data spatial units file that contains the area of each spatial unit or nodes that intersects the area of interest. If there are several areas for the same id, the areas will be added. Values must be numeric otherwise they will be ignored."
type: text
example: "area_spatial"
example: "area_ha"
column_date:
label: Date column name
description: "String refering to the name of the column that contains the date. The date is a mandatory field if you want an temporal analysis by time periods. The date can be entered as 'Year-Month-Day', just year, or just year-month; if given in a different format the row is ignored. If (NULL) is not specified, a single period is assumed for the entire data set and the following arguments will be ignored."
Expand All @@ -34,12 +34,10 @@ inputs:
label: Start of time interval
description: "Refering to the starting date of the time interval. If this value is null or not provided, the minimum date from the column_date will be automatically taken as the starting date for the analysis. This argument is ignored if column_date=NULL."
type: text
example: NA
time_end:
label: End of time interval
description: "Refering to the ending date of the time interval. If this value is null or not provided, the minimum date from the column_date will be automatically taken as the starting date for the analysis. This argument is ignored if column_date=NULL."
type: text
example: NA
outputs:
dpc_result:
label: Final table with dpC result
Expand Down

0 comments on commit 190d76b

Please sign in to comment.