-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
provide part of dummy implementation
- Loading branch information
Showing
10 changed files
with
168 additions
and
58 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
tensorflow |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,41 +1,35 @@ | ||
#pragma once | ||
|
||
#include "./tensorflow/stream_executor/device_memory.h" | ||
#include "./tensorflow/stream_executor/stream_executor_pimpl.h" | ||
#include "./tensorflow/stream_executor/platform.h" | ||
|
||
#include "./tensorflow/cc/saved_model/loader.h" | ||
#include "./tensorflow/cc/saved_model/tag_constants.h" | ||
|
||
#include "./tensorflow/core/util/stream_executor_util.h" | ||
|
||
#include "./tensorflow/core/platform/logging.h" | ||
#include "./tensorflow/core/platform/statusor.h" | ||
#include "./tensorflow/core/platform/refcount.h" | ||
#include "./tensorflow/core/platform/types.h" | ||
#include "./tensorflow/core/platform/macros.h" | ||
#include "./tensorflow/core/platform/status.h" | ||
#include "./tensorflow/core/platform/env.h" | ||
|
||
#include "./tensorflow/core/public/session_options.h" | ||
#include "./tensorflow/core/public/session.h" | ||
|
||
#include "./tensorflow/core/common_runtime/device_mgr.h" | ||
#include "./tensorflow/core/common_runtime/gpu/gpu_init.h" | ||
#include "./tensorflow/core/common_runtime/gpu/gpu_bfc_allocator.h" | ||
#include "./tensorflow/core/common_runtime/device/device_id.h" | ||
#include "./tensorflow/core/common_runtime/device/device_id_utils.h" | ||
#include "./tensorflow/core/common_runtime/device/device_mem_allocator.h" | ||
#include "./tensorflow/core/common_runtime/device/device_id.h" | ||
|
||
#include "./tensorflow/core/common_runtime/device_mgr.h" | ||
#include "./tensorflow/core/common_runtime/gpu/gpu_bfc_allocator.h" | ||
#include "./tensorflow/core/common_runtime/gpu/gpu_init.h" | ||
#include "./tensorflow/core/framework/allocator.h" | ||
#include "./tensorflow/core/framework/types.h" | ||
#include "./tensorflow/core/framework/tensor_shape.h" | ||
#include "./tensorflow/core/framework/tensor.h" | ||
#include "./tensorflow/core/framework/device.h" | ||
#include "./tensorflow/core/framework/op.h" | ||
#include "./tensorflow/core/framework/tensor.h" | ||
#include "./tensorflow/core/framework/tensor_shape.h" | ||
#include "./tensorflow/core/framework/types.h" | ||
#include "./tensorflow/core/platform/env.h" | ||
#include "./tensorflow/core/platform/logging.h" | ||
#include "./tensorflow/core/platform/macros.h" | ||
#include "./tensorflow/core/platform/refcount.h" | ||
#include "./tensorflow/core/platform/status.h" | ||
#include "./tensorflow/core/platform/statusor.h" | ||
#include "./tensorflow/core/platform/types.h" | ||
#include "./tensorflow/core/public/session.h" | ||
#include "./tensorflow/core/public/session_options.h" | ||
#include "./tensorflow/core/util/stream_executor_util.h" | ||
#include "./tensorflow/stream_executor/device_memory.h" | ||
#include "./tensorflow/stream_executor/platform.h" | ||
#include "./tensorflow/stream_executor/stream_executor_pimpl.h" | ||
|
||
namespace tensorflow_cpy { | ||
namespace tensorflow { | ||
using namespace ::tensorflow; | ||
} // namespace tensorflow | ||
namespace tensorflow { | ||
using namespace ::tensorflow; | ||
} // namespace tensorflow | ||
} // namespace tensorflow_cpy |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,104 @@ | ||
#include <tensorflow_cpy/tensorflow.h> | ||
|
||
#pragma GCC diagnostic push | ||
#pragma GCC diagnostic ignored "-Wunused-parameter" | ||
|
||
namespace tensorflow_cpy { | ||
namespace stream_executor { | ||
bool StreamExecutor::DeviceMemoryUsage(long*, long*) const { return true; } | ||
const DeviceDescription& StreamExecutor::GetDeviceDescription() const { | ||
return DeviceDescription::CreateDummy(); | ||
} | ||
void StreamExecutor::Deallocate(DeviceMemoryBase*) {} | ||
port::Status StreamExecutor::SynchronousMemcpyD2H(DeviceMemoryBase const&, long, void*) { | ||
return port::Status::OK(); | ||
} | ||
port::Status StreamExecutor::SynchronousMemcpyH2D(void const*, long, DeviceMemoryBase*) { | ||
return port::Status::OK(); | ||
} | ||
port::Status StreamExecutor::SynchronousMemSet(DeviceMemoryBase*, int, unsigned long) { | ||
return port::Status::OK(); | ||
} | ||
void* StreamExecutor::UnifiedMemoryAllocate(unsigned long) { return nullptr; } | ||
void StreamExecutor::UnifiedMemoryDeallocate(void*) {} | ||
}; // namespace stream_executor | ||
namespace tensorflow { | ||
namespace se = stream_executor; | ||
|
||
SessionOptions::SessionOptions() {} | ||
namespace internal { | ||
// LogMessageFatal::LogMessageFatal(char const*, int) {} | ||
// LogMessageFatal::~LogMessageFatal() {} | ||
}; | ||
|
||
namespace core { | ||
// RefCountDeleter::operator()(RefCounted const*) const{} | ||
// RefCounted::RefCounted() {} | ||
// RefCounted::Unref() const {} | ||
} | ||
|
||
|
||
Env* Env::Default() { return nullptr; } | ||
OpRegistry* OpRegistry::Global() { return nullptr; } | ||
Status OpRegistry::ProcessRegistrations() const { return Status::OK(); } | ||
void OpRegistry::DeferRegistrations() {} | ||
|
||
std::string AllocatorStats::DebugString() const { return ""; } | ||
int DataTypeSize(DataType) { return 0; } | ||
// void* DeviceMemAllocator::Alloc(unsigned long, unsigned long, unsigned long*){ | ||
// return nullptr; | ||
// } | ||
// void DeviceMemAllocator::Free(void*, unsigned long){} | ||
// DeviceMemAllocator::DeviceMemAllocator(se::StreamExecutor*, PlatformDeviceId, bool, | ||
// std::vector<std::function<void (void*, int, unsigned long)>, | ||
// std::allocator<std::function<void (void*, int, unsigned long)> > > const&, | ||
// std::vector<std::function<void (void*, int, unsigned long)>, | ||
// std::allocator<std::function<void (void*, int, unsigned long)> > > const&){} | ||
GPUBFCAllocator::GPUBFCAllocator( | ||
std::unique_ptr<SubAllocator, std::default_delete<SubAllocator> >, unsigned long, | ||
std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, | ||
GPUBFCAllocator::Options const&) {} | ||
Status ValidateGPUMachineManager() { return Status::OK(); } | ||
se::Platform* GPUMachineManager() { return nullptr; } | ||
std::string GpuPlatformName() { return ""; } | ||
Status LoadSavedModel(const SessionOptions& session_options, const RunOptions& run_options, | ||
const std::string& export_dir, | ||
const std::unordered_set<std::string>& tags, | ||
SavedModelBundle* const bundle) { | ||
return Status::OK(); | ||
} | ||
errors::Code Status::code() const { return errors::Code::OK; } | ||
|
||
const std::string& Status::error_message() const { | ||
static std::string null = ""; | ||
return null; | ||
} | ||
|
||
void Status::Update(const Status& new_status) {} | ||
void Status::IgnoreError() const {} | ||
bool Status::ok() const { return true; } | ||
Status& Status::operator=(Status const&) { return *this; } | ||
std::string Status::ToString() const { return ""; } | ||
SubAllocator::SubAllocator( | ||
std::vector<std::function<void(void*, int, unsigned long)>, | ||
std::allocator<std::function<void(void*, int, unsigned long)> > > const&, | ||
std::vector<std::function<void(void*, int, unsigned long)>, | ||
std::allocator<std::function<void(void*, int, unsigned long)> > > const&) {} | ||
void SubAllocator::VisitAlloc(void*, int, unsigned long) {} | ||
void SubAllocator::VisitFree(void*, int, unsigned long) {} | ||
|
||
void TensorShape::AddDim(long) {} | ||
int TensorShape::dims() const { return 0; } | ||
int64_t TensorShape::dim_size(int) const { return 0; } | ||
int64_t TensorShape::num_elements() const { return 0; } | ||
TensorShape::TensorShape(std::initializer_list<long>) {} | ||
|
||
std::string Tensor::DebugString(int) const { return ""; } | ||
Tensor::~Tensor() {} | ||
Tensor::Tensor(Allocator*, tensorflow::DataType, TensorShape const&) {} | ||
Tensor::Tensor(Tensor const&) {} | ||
Tensor::Tensor(DataType, TensorShape const&, TensorBuffer*) {} | ||
size_t Tensor::TotalBytes() const { return 0; } | ||
}; // namespace tensorflow | ||
}; // namespace tensorflow_cpy | ||
#pragma GCC diagnostic pop |