Skip to content

Commit adfa4ef

Browse files
committed
Adopted job to work with work-centers
1 parent 6a77617 commit adfa4ef

File tree

5 files changed

+100
-41
lines changed

5 files changed

+100
-41
lines changed

diploma_thesis/environment/job.py

+90-38
Original file line numberDiff line numberDiff line change
@@ -2,21 +2,31 @@
22
import torch
33

44
from dataclasses import dataclass, field
5+
from enum import Enum
56

67

78
@dataclass
89
class Job:
10+
11+
class ReductionStrategy(Enum):
12+
"""
13+
Job doesn't know in advance on which machine it will be processed inside work-center. ReductionStrategy
14+
defines the way, how the expected processing time on the work-center is calculated
15+
"""
16+
mean = 0
17+
min = 1
18+
max = 2
19+
920
# Id of the job
1021
id: int
11-
12-
# The sequence of indices, which job must visit in order to be complete
22+
# The sequence of work-centers, which job must visit in order to be complete
1323
step_idx: torch.LongTensor
14-
# The processing time of the job on each machine
24+
# The processing time of the job in workcenter depending on the machine it was scheduled
1525
processing_times: torch.LongTensor
16-
17-
# The index of the current operation to complete
18-
current_operation_idx: int = 0
19-
26+
# The index of the workcenter that the job is currently visiting
27+
current_step_idx: int = 0
28+
# The index of the machine in the work-center where the job is being processed
29+
current_machine_idx: int = 0
2030
# The creation time of the job
2131
created_at: torch.LongTensor = 0
2232
# The time of the job completion
@@ -25,85 +35,100 @@ class Job:
2535
due_at: torch.LongTensor = 0
2636
# The time, when each operation arrives to the specified machine
2737
arrived_at: torch.LongTensor = field(default_factory=torch.LongTensor)
28-
# The list of the times, when operation was selected for processing on the machine
38+
# The list of the times, when operation was selected for processing on the workcenter
2939
started_at: torch.LongTensor = field(default_factory=torch.LongTensor)
3040
# Slack, i.e. the amount of time, that the job can be postponed
3141
# It is calculated as the due_at - current_time - remaining_processing time, and it is recorded at the arrival
3242
# of the job on the machine
3343
slack: torch.LongTensor = field(default_factory=torch.LongTensor)
3444

3545
def __post_init__(self):
36-
self.arrived_at = torch.zeros_like(self.processing_times)
37-
self.started_at = torch.zeros_like(self.processing_times)
38-
self.stack = torch.zeros_like(self.processing_times)
46+
self.arrived_at = torch.zeros_like(self.step_idx)
47+
self.started_at = torch.zeros_like(self.step_idx)
48+
self.slack = torch.zeros_like(self.step_idx)
3949

4050
@property
41-
def processing_time_moments(self):
51+
def processing_time_moments(self, reduction_strategy: ReductionStrategy = ReductionStrategy.mean):
4252
"""
4353
Returns: The expectation and variance of processing times
4454
"""
45-
return torch.mean(self.processing_times), torch.std(self.processing_times)
55+
processing_times = self.expected_processing_times(self.processing_times.float(), reduction_strategy)
56+
57+
return torch.mean(processing_times), torch.std(processing_times)
4658

4759
@property
48-
def current_operation_processing_time(self):
60+
def current_operation_processing_time_on_machine(self):
4961
"""
50-
Returns: Returns the processing time of the current operation
62+
Returns: Returns the processing time of the current operation in machine
5163
"""
52-
return self.processing_times[self.current_operation_idx]
64+
return self.processing_times[self.current_step_idx][self.current_machine_idx]
5365

5466
@property
55-
def remaining_processing_time(self):
67+
def current_operation_processing_time_in_workcenter(self):
68+
"""
69+
Returns: Returns the processing time of the current operation in workcenter
70+
"""
71+
return self.processing_times[self.current_step_idx]
72+
73+
@property
74+
def remaining_processing_time(self, strategy: ReductionStrategy = ReductionStrategy.mean):
5675
"""
5776
Returns: The total processing time of the remaining operations
5877
"""
59-
return self.processing_times[self.current_operation_idx:].sum()
78+
79+
# Since we don't know, to which machine inside work-center the job will be dispatched next, we
80+
# approximate it with the average
81+
expected_processing_time = self.processing_times[self.current_step_idx:]
82+
expected_processing_time = self.expected_processing_times(expected_processing_time.float(), strategy)
83+
84+
return expected_processing_time.sum()
6085

6186
@property
6287
def remaining_operations_count(self):
6388
"""
6489
Returns: The number of remaining operations
6590
"""
66-
return len(self.processing_times) - self.current_operation_idx
91+
return self.processing_times.shape[0] - self.current_step_idx
6792

6893
@property
69-
def next_machine_idx(self):
94+
def next_work_center_idx(self):
7095
"""
71-
Returns: The index of the next machine to visit or None if there is no next machine
96+
Returns: The index of the work-center to visit or None if the job is completed
7297
"""
73-
next_idx = self.current_operation_idx + 1
98+
next_idx = self.current_step_idx + 1
7499

75100
if next_idx >= len(self.step_idx):
76101
return None
77102

78103
return self.step_idx[next_idx]
79104

80105
@property
81-
def next_operation_processing_time(self):
106+
def next_operation_processing_time(self, strategy: ReductionStrategy = ReductionStrategy.mean):
82107
"""
83108
Returns: The processing time of the next operation
84109
"""
85-
next_idx = self.current_operation_idx + 1
110+
next_idx = self.current_step_idx + 1
86111

87112
if next_idx >= len(self.step_idx):
88113
return 0
89114

90-
return self.processing_times[next_idx]
115+
pt = self.processing_times[next_idx]
116+
117+
return self.expected_processing_times(pt.float(), strategy)
91118

92119
@property
93120
def slack_upon_arrival(self):
94121
"""
95122
Returns: The slack upon arrival of the job on the machine
96123
"""
97-
return self.slack[self.current_operation_idx]
124+
return self.slack[self.current_step_idx]
98125

99126
def slack_upon_now(self, now: int):
100127
"""
101-
102128
Args:
103129
now: Current time
104130
105-
Returns: The slack upon arrival of the job on the machine at now
106-
131+
Returns: The slack upon now of the job on the machine
107132
"""
108133
return self.due_at - now - self.remaining_processing_time
109134

@@ -118,26 +143,40 @@ def time_until_due(self, now: int):
118143

119144
def current_operation_waiting_time(self, now: int):
120145
"""
121-
122146
Args:
123147
now: Current time
124148
125149
Returns: The time that the current operation has been waiting for processing on current machine
126-
127150
"""
128-
return now - self.arrived_at[self.current_operation_idx]
151+
return now - self.arrived_at[self.current_step_idx]
129152

130153
def operation_completion_rate(self):
131154
"""
132-
Returns: The completion rate of the job based on the number of completed operations
155+
The completion rate of the job based on the number of completed operations
133156
"""
134157
return self.remaining_operations_count / len(self.step_idx)
135158

136159
def time_completion_rate(self, now: int):
137160
"""
138-
Returns: The completion rate of the job based on the remaining processing time
161+
The completion rate of the job based on the remaining processing time
139162
"""
140163
return self.remaining_processing_time / self.processing_times.sum()
164+
def with_next_step(self):
165+
"""
166+
Advances the job to the next work-center
167+
"""
168+
self.current_step_idx += 1
169+
self.current_machine_idx = -1
170+
171+
return self
172+
173+
def with_assigned_machine(self, machine_idx: int):
174+
"""
175+
Advances the job to the next machine
176+
"""
177+
self.current_machine_idx = machine_idx
178+
179+
return self
141180

142181
def with_arrival(self, now: int):
143182
"""
@@ -148,7 +187,6 @@ def with_arrival(self, now: int):
148187
149188
Returns: Reference to self
150189
"""
151-
# TODO: Where the move to the next machine is updated?
152190
return self.with_current_operation_arrival_time(now).with_current_operation_slack_upon_arrival()
153191

154192
def with_current_operation_arrival_time(self, now: int):
@@ -160,7 +198,7 @@ def with_current_operation_arrival_time(self, now: int):
160198
161199
Returns: Reference to self
162200
"""
163-
self.arrived_at[self.current_operation_idx] = now
201+
self.arrived_at[self.current_step_idx] = now
164202

165203
return self
166204

@@ -173,7 +211,7 @@ def with_current_operation_start_time(self, now: int):
173211
174212
Returns: Reference to self
175213
"""
176-
self.started_at[self.current_operation_idx] = now
214+
self.started_at[self.current_step_idx] = now
177215

178216
return self
179217

@@ -186,7 +224,7 @@ def with_current_operation_slack_upon_arrival(self):
186224
187225
Returns: Reference to self
188226
"""
189-
self.slack[self.current_operation_idx] = self.slack_upon_now(self.arrived_at[self.current_operation_idx])
227+
self.slack[self.current_step_idx] = self.slack_upon_now(self.arrived_at[self.current_step_idx])
190228

191229
return self
192230

@@ -220,3 +258,17 @@ def with_completion_time(self, time: int):
220258
self.completed_at = time
221259

222260
return self
261+
262+
@staticmethod
263+
def expected_processing_times(processing_times, strategy: ReductionStrategy):
264+
processing_times = torch.atleast_2d(processing_times)
265+
266+
match strategy:
267+
case Job.ReductionStrategy.mean:
268+
return processing_times.mean(axis=1)
269+
case Job.ReductionStrategy.min:
270+
return processing_times.min(axis=1)
271+
case Job.ReductionStrategy.max:
272+
return processing_times.max(axis=1)
273+
case _:
274+
raise ValueError(f"Unknown reduction strategy {strategy}")

diploma_thesis/environment/problem.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ def add_cli_arguments(parser: argparse.ArgumentParser):
7979
"--tightness-factor",
8080
help="Tightness factor",
8181
type=float,
82-
default=1.0
82+
default=2.0
8383
)
8484

8585
sub_parser.add_argument(

diploma_thesis/environment/shopfloor.py

+5-2
Original file line numberDiff line numberDiff line change
@@ -62,16 +62,19 @@ def __init__(
6262
self.state = State()
6363
self.history = History()
6464

65+
def simulate(self):
6566
self.assign_initial_jobs()
6667

6768
self.environment.process(self.dispatch_jobs())
6869

70+
# Redefine generation of Work-Center paths
71+
6972
def assign_initial_jobs(self):
7073
step_idx = torch.arange(self.description.workcenter_count, dtype=torch.long)
7174

7275
for work_center in self.work_centers:
73-
for machine in work_center.context.machines:
74-
step_idx = torch.randperm(step_idx, generator=self.generator)
76+
for _ in work_center.context.machines:
77+
step_idx = step_idx[torch.randperm(step_idx.shape[0], generator=self.generator)]
7578

7679
job = self.__sample_job__(step_idx,
7780
initial_work_center_idx=work_center.state.idx,

diploma_thesis/environment/work_center.py

+2
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,8 @@ def connect(self, machines: List['Machine'], work_centers: List['WorkCenter'], s
6363
"""
6464
self.context.with_info(machines, work_centers, shopfloor)
6565

66+
print(self.context)
67+
6668
self.environment.process(self.dispatch())
6769

6870
def dispatch(self):

diploma_thesis/workflows/debug.py

+2
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,8 @@ def run(self):
4444
for machine in machines:
4545
machine.connect(machines, work_centers, shopfloor)
4646

47+
shopfloor.simulate()
48+
4749
self.configuration.environment.run()
4850

4951
def __make_working_units__(self) -> Tuple[List[WorkCenter], List[Machine]]:

0 commit comments

Comments
 (0)