Skip to content

Commit 21360c7

Browse files
lilaotensorflow-copybara
authored andcommitted
Replace int64 with int64_t and uint64 with uint64_t.
PiperOrigin-RevId: 402347161
1 parent 7934ef4 commit 21360c7

File tree

83 files changed

+339
-333
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

83 files changed

+339
-333
lines changed

tensorflow_serving/batching/batch_scheduler_retrier.h

+3-3
Original file line numberDiff line numberDiff line change
@@ -39,10 +39,10 @@ class BatchSchedulerRetrier : public BatchScheduler<TaskType> {
3939
struct Options {
4040
// The maximum amount of time to spend retrying 'wrapped_->Schedule()'
4141
// calls, in microseconds.
42-
int64 max_time_micros = 10 * 1000 /* 10 milliseconds */;
42+
int64_t max_time_micros = 10 * 1000 /* 10 milliseconds */;
4343

4444
// The amount of time to pause between retry attempts, in microseconds.
45-
int64 retry_delay_micros = 100;
45+
int64_t retry_delay_micros = 100;
4646

4747
// The environment to use for time and sleeping.
4848
Env* env = Env::Default();
@@ -94,7 +94,7 @@ Status BatchSchedulerRetrier<TaskType>::Schedule(
9494
std::unique_ptr<TaskType>* task) {
9595
Status status;
9696

97-
const uint64 start_time_micros = options_.env->NowMicros();
97+
const uint64_t start_time_micros = options_.env->NowMicros();
9898
for (;;) {
9999
status = wrapped_->Schedule(task);
100100
if (status.code() != error::UNAVAILABLE) {

tensorflow_serving/batching/batching_session.cc

+10-10
Original file line numberDiff line numberDiff line change
@@ -64,8 +64,8 @@ string TensorSignatureDebugString(const TensorSignature& signature) {
6464
}
6565

6666
struct HashTensorSignature {
67-
uint64 operator()(const TensorSignature& signature) const {
68-
uint64 hash = 0xDECAFCAFFE /* seed */;
67+
uint64_t operator()(const TensorSignature& signature) const {
68+
uint64_t hash = 0xDECAFCAFFE /* seed */;
6969
for (const string& input_tensor : signature.input_tensors) {
7070
hash = HashCombine(hash, std::hash<string>()(input_tensor));
7171
}
@@ -585,7 +585,7 @@ Status BatchingSession::SplitOutputTensors(
585585
batch->num_tasks());
586586
}
587587

588-
std::vector<int64> task_sizes_plus_optional_padding;
588+
std::vector<int64_t> task_sizes_plus_optional_padding;
589589
task_sizes_plus_optional_padding.reserve(batch->num_tasks());
590590
for (int i = 0; i < batch->num_tasks(); ++i) {
591591
task_sizes_plus_optional_padding.push_back(batch->task(i).zeroth_dim_size);
@@ -709,7 +709,7 @@ void BatchingSession::ProcessBatch(
709709
return;
710710
}
711711

712-
const uint64 dequeue_time_micros = EnvTime::NowMicros();
712+
const uint64_t dequeue_time_micros = EnvTime::NowMicros();
713713

714714
// Regardless of the outcome, we need to propagate the status to the
715715
// individual tasks and signal that they are done. We use MakeCleanup() to
@@ -732,16 +732,16 @@ void BatchingSession::ProcessBatch(
732732
// queue time alone, and find the latest task deadline which we'll use for the
733733
// overall batch.
734734
bool all_tasks_timeout_exceeded = true;
735-
uint64 batch_deadline_micros = 0;
735+
uint64_t batch_deadline_micros = 0;
736736
for (int i = 0; i < batch->num_tasks(); ++i) {
737737
const BatchingSessionTask& task = batch->task(i);
738738
// If the caller doesn't populate RunOptions, the timeout is 0 by default.
739739
// Interpret that as "no timeout" i.e. infinity.
740-
const int64 task_timeout_micros =
740+
const int64_t task_timeout_micros =
741741
task.run_options.timeout_in_ms() <= 0
742742
? INT_MAX
743743
: task.run_options.timeout_in_ms() * 1000;
744-
const uint64 task_deadline_micros =
744+
const uint64_t task_deadline_micros =
745745
task.enqueue_time_micros + task_timeout_micros;
746746
if (task_deadline_micros > dequeue_time_micros) {
747747
all_tasks_timeout_exceeded = false;
@@ -809,7 +809,7 @@ Status SplitInputTask(
809809
int open_batch_remaining_slot, int max_batch_size,
810810
std::vector<std::unique_ptr<BatchingSessionTask>>* output_tasks) {
811811
BatchingSessionTask& input_task = *(*input_task_ptr);
812-
const int64 input_task_size = input_task.size();
812+
const int64_t input_task_size = input_task.size();
813813

814814
DCHECK_GT(input_task_size, 0);
815815

@@ -884,9 +884,9 @@ Status SplitInputTask(
884884
const internal::InputSplitMetadata input_split_metadata(
885885
input_task_size, open_batch_remaining_slot, max_batch_size);
886886

887-
// Creates an array of int64 from an array of int, since `tensor::Split`
887+
// Creates an array of int64_t from an array of int, since `tensor::Split`
888888
// requires an array of int64.
889-
const absl::FixedArray<int64> output_task_sizes(
889+
const absl::FixedArray<int64_t> output_task_sizes(
890890
input_split_metadata.task_sizes().begin(),
891891
input_split_metadata.task_sizes().end());
892892
const int num_batches = output_task_sizes.size();

tensorflow_serving/batching/batching_session.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -172,7 +172,7 @@ struct BatchingSessionTask : public BatchTask {
172172
static std::string Name() { return "batching_session"; }
173173

174174
// Fields populated when a task is received.
175-
uint64 enqueue_time_micros;
175+
uint64_t enqueue_time_micros;
176176
RunOptions run_options;
177177
size_t zeroth_dim_size;
178178
const std::vector<std::pair<string, Tensor>>* inputs;

tensorflow_serving/batching/batching_util.cc

+2-2
Original file line numberDiff line numberDiff line change
@@ -33,8 +33,8 @@ namespace serving {
3333
// It requires padding to be an array of elements that have fields
3434
// "first" and "second".
3535
struct OneDimPadding {
36-
int64 first; // pad before
37-
int64 second; // pad after
36+
int64_t first; // pad before
37+
int64_t second; // pad after
3838
};
3939

4040
// Constructs array of paddings, where:

tensorflow_serving/batching/streaming_batch_scheduler.cc

+3-3
Original file line numberDiff line numberDiff line change
@@ -27,14 +27,14 @@ namespace internal {
2727
// SingleTaskScheduler
2828

2929
SingleTaskScheduler::SingleTaskScheduler(Env* env, string thread_name,
30-
uint64 no_tasks_wait_time_micros)
30+
uint64_t no_tasks_wait_time_micros)
3131
: env_(env),
3232
thread_name_(std::move(thread_name)),
3333
no_tasks_wait_time_micros_(no_tasks_wait_time_micros) {}
3434

3535
SingleTaskScheduler::~SingleTaskScheduler() { stop_.Notify(); }
3636

37-
void SingleTaskScheduler::Schedule(uint64 time_micros,
37+
void SingleTaskScheduler::Schedule(uint64_t time_micros,
3838
std::function<void()> closure) {
3939
DCHECK_GE(time_micros, last_task_time_);
4040
last_task_time_ = time_micros;
@@ -56,7 +56,7 @@ void SingleTaskScheduler::ThreadLogic() {
5656
for (;;) {
5757
// Sleep until the time specified in the current task, if any.
5858
if (current_task) {
59-
const uint64 now = env_->NowMicros();
59+
const uint64_t now = env_->NowMicros();
6060
if (current_task->time_micros > now) {
6161
env_->SleepForMicroseconds(current_task->time_micros - now);
6262
}

tensorflow_serving/batching/streaming_batch_scheduler.h

+12-12
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,7 @@ class StreamingBatchScheduler : public BatchScheduler<TaskType> {
135135
//
136136
// A negative value means that no timeout will be enforced. This setting is
137137
// useful in some test code.
138-
int64 batch_timeout_micros = 0;
138+
int64_t batch_timeout_micros = 0;
139139

140140
// The name to use for the pool of batch threads.
141141
string thread_pool_name = "batch_threads";
@@ -151,7 +151,7 @@ class StreamingBatchScheduler : public BatchScheduler<TaskType> {
151151

152152
// How long SingleTaskScheduler should wait if there are no scheduled tasks,
153153
// in microseconds.
154-
uint64 no_tasks_wait_time_micros = 1000; // 1 millisecond
154+
uint64_t no_tasks_wait_time_micros = 1000; // 1 millisecond
155155
};
156156
static Status Create(
157157
const Options& options,
@@ -188,7 +188,7 @@ class StreamingBatchScheduler : public BatchScheduler<TaskType> {
188188
// Takes a snapshot of 'open_batch_num_', and schedules an event with
189189
// 'batch_closer_' to close it at time 'close_time_micros' if it is still open
190190
// at that time.
191-
void ScheduleCloseOfCurrentOpenBatch(uint64 close_time_micros)
191+
void ScheduleCloseOfCurrentOpenBatch(uint64_t close_time_micros)
192192
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
193193

194194
const Options options_;
@@ -209,7 +209,7 @@ class StreamingBatchScheduler : public BatchScheduler<TaskType> {
209209

210210
// The sequence number of 'open_batch_'. Incremented each time 'open_batch_'
211211
// is assigned to a new (non-null) batch object.
212-
int64 open_batch_num_ TF_GUARDED_BY(mu_) = 0;
212+
int64_t open_batch_num_ TF_GUARDED_BY(mu_) = 0;
213213

214214
// The number of batches "in progress", i.e. batches that have been started
215215
// but for which the process-batch callback hasn't finished. Note that this
@@ -246,7 +246,7 @@ namespace internal {
246246
class SingleTaskScheduler {
247247
public:
248248
SingleTaskScheduler(Env* env, string thread_name,
249-
uint64 no_tasks_wait_time_micros);
249+
uint64_t no_tasks_wait_time_micros);
250250

251251
// Blocks until the currently-set closure (if any) runs.
252252
~SingleTaskScheduler();
@@ -256,7 +256,7 @@ class SingleTaskScheduler {
256256
// cancels any closures provided in them (if they haven't already been run).
257257
//
258258
// IMPORTANT: 'time_micros' must be monotonically non-decreasing across calls.
259-
void Schedule(uint64 time_micros, std::function<void()> closure);
259+
void Schedule(uint64_t time_micros, std::function<void()> closure);
260260

261261
private:
262262
// The code executed in 'thread_'. Looks for updated tasks, and executes them
@@ -271,7 +271,7 @@ class SingleTaskScheduler {
271271

272272
// The arguments to Schedule().
273273
struct Task {
274-
uint64 time_micros;
274+
uint64_t time_micros;
275275
std::function<void()> closure;
276276
};
277277

@@ -280,7 +280,7 @@ class SingleTaskScheduler {
280280

281281
// The time parameter passed in the most recent Schedule() invocation.
282282
// Used to enforce monotonicity.
283-
uint64 last_task_time_ = 0;
283+
uint64_t last_task_time_ = 0;
284284

285285
// A notification for stopping the thread, during destruction.
286286
Notification stop_;
@@ -292,7 +292,7 @@ class SingleTaskScheduler {
292292
std::unique_ptr<Thread> thread_;
293293

294294
// How long to wait if there are no scheduled tasks, in microseconds.
295-
const uint64 no_tasks_wait_time_micros_;
295+
const uint64_t no_tasks_wait_time_micros_;
296296

297297
TF_DISALLOW_COPY_AND_ASSIGN(SingleTaskScheduler);
298298
};
@@ -363,7 +363,7 @@ Status StreamingBatchScheduler<TaskType>::Schedule(
363363
// If we are about to add the first task to a batch, schedule the batch to
364364
// be closed after the timeout.
365365
if (options_.batch_timeout_micros > 0 && open_batch_->empty()) {
366-
const uint64 batch_deadline =
366+
const uint64_t batch_deadline =
367367
options_.env->NowMicros() + options_.batch_timeout_micros;
368368
ScheduleCloseOfCurrentOpenBatch(batch_deadline);
369369
}
@@ -433,13 +433,13 @@ void StreamingBatchScheduler<TaskType>::StartNewBatch() {
433433

434434
template <typename TaskType>
435435
void StreamingBatchScheduler<TaskType>::ScheduleCloseOfCurrentOpenBatch(
436-
uint64 close_time_micros) {
436+
uint64_t close_time_micros) {
437437
if (batch_closer_ == nullptr) {
438438
batch_closer_.reset(new internal::SingleTaskScheduler(
439439
options_.env, "batch_closer", options_.no_tasks_wait_time_micros));
440440
}
441441

442-
const int64 batch_num_to_close = open_batch_num_;
442+
const int64_t batch_num_to_close = open_batch_num_;
443443
batch_closer_->Schedule(close_time_micros, [this, batch_num_to_close] {
444444
{
445445
mutex_lock l(this->mu_);

tensorflow_serving/core/aspired_versions_manager.cc

+8-8
Original file line numberDiff line numberDiff line change
@@ -95,9 +95,9 @@ Status ValidateAspiredVersions(
9595
}
9696

9797
// Returns the set of version numbers in 'versions'.
98-
std::set<int64> GetVersionNumbers(
98+
std::set<int64_t> GetVersionNumbers(
9999
const std::vector<ServableData<std::unique_ptr<Loader>>>& versions) {
100-
std::set<int64> version_numbers;
100+
std::set<int64_t> version_numbers;
101101
for (const auto& version : versions) {
102102
version_numbers.insert(version.id().version);
103103
}
@@ -176,7 +176,7 @@ Status AspiredVersionsManager::Create(
176176
}
177177

178178
AspiredVersionsManager::AspiredVersionsManager(
179-
int64 manage_state_interval_micros, Env* env,
179+
int64_t manage_state_interval_micros, Env* env,
180180
std::unique_ptr<AspiredVersionPolicy> aspired_version_policy,
181181
std::unique_ptr<BasicManager> basic_manager)
182182
: aspired_version_policy_(std::move(aspired_version_policy)),
@@ -256,14 +256,14 @@ void AspiredVersionsManager::ProcessAspiredVersionsRequest(
256256
VLOG(1) << "Processing aspired versions request: " << servable_name << ": "
257257
<< ServableVersionsDebugString(versions);
258258

259-
const std::set<int64> next_aspired_versions = GetVersionNumbers(versions);
259+
const std::set<int64_t> next_aspired_versions = GetVersionNumbers(versions);
260260

261261
// We gather all the servables with the servable_name and
262262
// 1. Add the current aspired version numbers to a set,
263263
// 2. Set the aspired bool to false for all current servable harnesses which
264264
// are not aspired.
265-
std::set<int64> current_aspired_versions;
266-
std::set<int64> current_aspired_versions_with_error;
265+
std::set<int64_t> current_aspired_versions;
266+
std::set<int64_t> current_aspired_versions_with_error;
267267
const std::vector<ServableStateSnapshot<Aspired>> state_snapshots =
268268
basic_manager_->GetManagedServableStateSnapshots<Aspired>(
269269
string(servable_name));
@@ -287,7 +287,7 @@ void AspiredVersionsManager::ProcessAspiredVersionsRequest(
287287
// We do a set_difference (A - B), on the next aspired versions and the
288288
// current aspired versions to find the version numbers which need to be
289289
// added the harness map.
290-
std::set<int64> additions;
290+
std::set<int64_t> additions;
291291
std::set_difference(
292292
next_aspired_versions.begin(), next_aspired_versions.end(),
293293
current_aspired_versions.begin(), current_aspired_versions.end(),
@@ -338,7 +338,7 @@ bool AspiredVersionsManager::ContainsAnyReaspiredVersions(
338338
const std::vector<ServableStateSnapshot<Aspired>> state_snapshots =
339339
basic_manager_->GetManagedServableStateSnapshots<Aspired>(
340340
string(servable_name));
341-
const std::set<int64> version_numbers = GetVersionNumbers(versions);
341+
const std::set<int64_t> version_numbers = GetVersionNumbers(versions);
342342
for (const ServableStateSnapshot<Aspired>& state_snapshot : state_snapshots) {
343343
if (!state_snapshot.additional_state->is_aspired &&
344344
version_numbers.find(state_snapshot.id.version) !=

tensorflow_serving/core/aspired_versions_manager.h

+3-3
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,7 @@ class AspiredVersionsManager : public Manager,
9797
/// The periodicity, in microseconds, of the thread which manages the state
9898
/// of the servables. Default: 100 milliseconds. If this is set less than or
9999
/// equal to 0, we don't run this thread at all.
100-
int64 manage_state_interval_micros = 100 * 1000;
100+
int64_t manage_state_interval_micros = 100 * 1000;
101101

102102
/// EventBus to publish servable state changes. This is optional, if unset,
103103
/// we don't publish.
@@ -125,7 +125,7 @@ class AspiredVersionsManager : public Manager,
125125
/// The interval, in microseconds, between each servable load retry. If set
126126
/// negative, we don't wait.
127127
/// Default: 1 minute.
128-
int64 load_retry_interval_micros = 1LL * 60 * 1000 * 1000;
128+
int64_t load_retry_interval_micros = 1LL * 60 * 1000 * 1000;
129129

130130
// If true, and there are not multiple load threads, filesystem caches will
131131
// be flushed after each servable is loaded. (Cache flush is skipped when
@@ -218,7 +218,7 @@ class AspiredVersionsManager : public Manager,
218218
AspiredVersionsManager* manager);
219219

220220
AspiredVersionsManager(
221-
int64 manage_state_interval_micros, Env* env,
221+
int64_t manage_state_interval_micros, Env* env,
222222
std::unique_ptr<AspiredVersionPolicy> aspired_version_policy,
223223
std::unique_ptr<BasicManager> basic_manager);
224224

0 commit comments

Comments
 (0)