Skip to content

Commit

Permalink
provide part of dummy implementation
Browse files Browse the repository at this point in the history
  • Loading branch information
chaoqing committed Dec 10, 2024
1 parent 52be440 commit 2e0c4b4
Show file tree
Hide file tree
Showing 10 changed files with 168 additions and 58 deletions.
1 change: 1 addition & 0 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ jobs:
run: |
poetry config virtualenvs.in-project true
poetry install
make protobuf-install
- name: Run style checks
run: |
Expand Down
16 changes: 11 additions & 5 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ example-tensorflow: build-sample example-graph
example: example-pycxpress example-tensorflow

build: build-sample build-dist
rebuild: build-remove build
rebuild: cleanup source-all build
.NOTPARALLEL: rebuild

#*****************************************#
Expand Down Expand Up @@ -265,11 +265,17 @@ build-dist:
$(PYTHON) -m build --outdir dist/

.PHONY: build-remove
build-remove:
rm -rf $(REPO_DIR)/dist/
rm -rf $(REPO_DIR)/build/
build-remove: cleanup
test ! -d build/sample || cmake --build build/sample --target clean
test ! -d build/tests || cmake --build build/tests --target clean
test ! -d build/doc || cmake --build build/doc --target clean

.PHONY: cleanup
cleanup: pycache-remove dsstore-remove mypycache-remove ipynbcheckpoints-remove pytestcache-remove
cleanup: pycache-remove dsstore-remove mypycache-remove ipynbcheckpoints-remove pytestcache-remove build-remove

.PHONY: distclean
distclean: cleanup
rm -rf $(REPO_DIR)/dist/
rm -rf $(REPO_DIR)/build/

FORCE:
6 changes: 2 additions & 4 deletions src/TensorflowCpy/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ else()
if(USE_LIBTENSORFLOW_CC)
find_package(TensorFlow REQUIRED)
else()
add_subdirectory("${CMAKE_CURRENT_SOURCE_DIR}/proto")
add_subdirectory("${CMAKE_CURRENT_SOURCE_DIR}/proto")
add_compile_definitions(ENABLE_TENSORFLOW_CPY=1)
endif()
endif()
Expand All @@ -31,9 +31,7 @@ endif()
# Note: globbing sources is considered bad practice as CMake's generators may not detect new files
# automatically. Keep that in mind when changing files, or explicitly mention them here.
file(GLOB_RECURSE headers CONFIGURE_DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/include/**/*.h")
file(GLOB_RECURSE sources CONFIGURE_DEPENDS
"${CMAKE_CURRENT_SOURCE_DIR}/source/*.cpp"
)
file(GLOB_RECURSE sources CONFIGURE_DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/source/*.cpp")

# ---- Create library ----
# Note: for header-only libraries change all PUBLIC flags to INTERFACE and create an interface
Expand Down
1 change: 1 addition & 0 deletions src/TensorflowCpy/include/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
tensorflow
52 changes: 23 additions & 29 deletions src/TensorflowCpy/include/tensorflow_cpy/tensorflow.h
Original file line number Diff line number Diff line change
@@ -1,41 +1,35 @@
#pragma once

#include "./tensorflow/stream_executor/device_memory.h"
#include "./tensorflow/stream_executor/stream_executor_pimpl.h"
#include "./tensorflow/stream_executor/platform.h"

#include "./tensorflow/cc/saved_model/loader.h"
#include "./tensorflow/cc/saved_model/tag_constants.h"

#include "./tensorflow/core/util/stream_executor_util.h"

#include "./tensorflow/core/platform/logging.h"
#include "./tensorflow/core/platform/statusor.h"
#include "./tensorflow/core/platform/refcount.h"
#include "./tensorflow/core/platform/types.h"
#include "./tensorflow/core/platform/macros.h"
#include "./tensorflow/core/platform/status.h"
#include "./tensorflow/core/platform/env.h"

#include "./tensorflow/core/public/session_options.h"
#include "./tensorflow/core/public/session.h"

#include "./tensorflow/core/common_runtime/device_mgr.h"
#include "./tensorflow/core/common_runtime/gpu/gpu_init.h"
#include "./tensorflow/core/common_runtime/gpu/gpu_bfc_allocator.h"
#include "./tensorflow/core/common_runtime/device/device_id.h"
#include "./tensorflow/core/common_runtime/device/device_id_utils.h"
#include "./tensorflow/core/common_runtime/device/device_mem_allocator.h"
#include "./tensorflow/core/common_runtime/device/device_id.h"

#include "./tensorflow/core/common_runtime/device_mgr.h"
#include "./tensorflow/core/common_runtime/gpu/gpu_bfc_allocator.h"
#include "./tensorflow/core/common_runtime/gpu/gpu_init.h"
#include "./tensorflow/core/framework/allocator.h"
#include "./tensorflow/core/framework/types.h"
#include "./tensorflow/core/framework/tensor_shape.h"
#include "./tensorflow/core/framework/tensor.h"
#include "./tensorflow/core/framework/device.h"
#include "./tensorflow/core/framework/op.h"
#include "./tensorflow/core/framework/tensor.h"
#include "./tensorflow/core/framework/tensor_shape.h"
#include "./tensorflow/core/framework/types.h"
#include "./tensorflow/core/platform/env.h"
#include "./tensorflow/core/platform/logging.h"
#include "./tensorflow/core/platform/macros.h"
#include "./tensorflow/core/platform/refcount.h"
#include "./tensorflow/core/platform/status.h"
#include "./tensorflow/core/platform/statusor.h"
#include "./tensorflow/core/platform/types.h"
#include "./tensorflow/core/public/session.h"
#include "./tensorflow/core/public/session_options.h"
#include "./tensorflow/core/util/stream_executor_util.h"
#include "./tensorflow/stream_executor/device_memory.h"
#include "./tensorflow/stream_executor/platform.h"
#include "./tensorflow/stream_executor/stream_executor_pimpl.h"

namespace tensorflow_cpy {
namespace tensorflow {
using namespace ::tensorflow;
} // namespace tensorflow
namespace tensorflow {
using namespace ::tensorflow;
} // namespace tensorflow
} // namespace tensorflow_cpy
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,10 @@ class DeviceDescription {
// value will be provided.
static const char *kUndefinedString;

static DeviceDescription& CreateDummy(){
static DeviceDescription dummy;
return dummy;
}
private:
DeviceDescription();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
#define TENSORFLOW_CPY_STREAM_EXECUTOR_STREAM_EXECUTOR_PIMPL_H_


#include "../core/platform/statusor.h"
#include "../stream_executor/device_memory.h"
#include "../stream_executor/platform.h"

Expand Down Expand Up @@ -53,7 +52,9 @@ class StreamExecutor {
// elements.
template <typename T>
DeviceMemory<T> AllocateArray(uint64_t element_count,
int64_t memory_space = 0);
int64_t memory_space = 0){
return DeviceMemory<T>{};
}

// Convenience wrapper that allocates space for a single element of type T in
// device memory.
Expand Down
2 changes: 1 addition & 1 deletion src/TensorflowCpy/proto/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ file(GLOB_RECURSE protos CONFIGURE_DEPENDS
)

add_library(proto-objects OBJECT ${protos})
set(PROTO_BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}/protobuf-generated")
set(PROTO_BINARY_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../include")
make_directory(${PROTO_BINARY_DIR})

protobuf_generate(
Expand Down
35 changes: 18 additions & 17 deletions src/TensorflowCpy/source/framework.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,27 +31,28 @@ void wait_for_debugger_attach() {


#if ENABLE_TENSORFLOW_CPY
#include <tensorflow_cpy/tensorflow.h>
# include <tensorflow_cpy/tensorflow.h>

namespace tf = tensorflow_cpy::tensorflow;
namespace se = tensorflow_cpy::stream_executor;
#else
#include <tensorflow/cc/saved_model/loader.h>
#include <tensorflow/cc/saved_model/tag_constants.h>
#include <tensorflow/core/common_runtime/device/device_id.h>
#include <tensorflow/core/common_runtime/device/device_id_utils.h>
#include <tensorflow/core/common_runtime/device/device_mem_allocator.h>
#include <tensorflow/core/common_runtime/device_mgr.h>
#include <tensorflow/core/common_runtime/gpu/gpu_bfc_allocator.h>
#include <tensorflow/core/common_runtime/gpu/gpu_init.h>
#include "tensorflow/core/platform/types.h"
#include <tensorflow/core/framework/tensor.h>
#include <tensorflow/core/framework/device.h>
#include <tensorflow/core/framework/op.h>
#include <tensorflow/core/framework/types.h>
#include <tensorflow/core/platform/env.h>
#include <tensorflow/core/public/session.h>
#include <tensorflow/core/util/stream_executor_util.h>
# include <tensorflow/cc/saved_model/loader.h>
# include <tensorflow/cc/saved_model/tag_constants.h>
# include <tensorflow/core/common_runtime/device/device_id.h>
# include <tensorflow/core/common_runtime/device/device_id_utils.h>
# include <tensorflow/core/common_runtime/device/device_mem_allocator.h>
# include <tensorflow/core/common_runtime/device_mgr.h>
# include <tensorflow/core/common_runtime/gpu/gpu_bfc_allocator.h>
# include <tensorflow/core/common_runtime/gpu/gpu_init.h>
# include <tensorflow/core/framework/device.h>
# include <tensorflow/core/framework/op.h>
# include <tensorflow/core/framework/tensor.h>
# include <tensorflow/core/framework/types.h>
# include <tensorflow/core/platform/env.h>
# include <tensorflow/core/public/session.h>
# include <tensorflow/core/util/stream_executor_util.h>

# include "tensorflow/core/platform/types.h"

namespace tf = tensorflow;
namespace se = stream_executor;
Expand Down
104 changes: 104 additions & 0 deletions src/TensorflowCpy/source/tensorflow.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
#include <tensorflow_cpy/tensorflow.h>

#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"

namespace tensorflow_cpy {
namespace stream_executor {
bool StreamExecutor::DeviceMemoryUsage(long*, long*) const { return true; }
const DeviceDescription& StreamExecutor::GetDeviceDescription() const {
return DeviceDescription::CreateDummy();
}
void StreamExecutor::Deallocate(DeviceMemoryBase*) {}
port::Status StreamExecutor::SynchronousMemcpyD2H(DeviceMemoryBase const&, long, void*) {
return port::Status::OK();
}
port::Status StreamExecutor::SynchronousMemcpyH2D(void const*, long, DeviceMemoryBase*) {
return port::Status::OK();
}
port::Status StreamExecutor::SynchronousMemSet(DeviceMemoryBase*, int, unsigned long) {
return port::Status::OK();
}
void* StreamExecutor::UnifiedMemoryAllocate(unsigned long) { return nullptr; }
void StreamExecutor::UnifiedMemoryDeallocate(void*) {}
}; // namespace stream_executor
namespace tensorflow {
namespace se = stream_executor;

SessionOptions::SessionOptions() {}
namespace internal {
// LogMessageFatal::LogMessageFatal(char const*, int) {}
// LogMessageFatal::~LogMessageFatal() {}
};

namespace core {
// RefCountDeleter::operator()(RefCounted const*) const{}
// RefCounted::RefCounted() {}
// RefCounted::Unref() const {}
}


Env* Env::Default() { return nullptr; }
OpRegistry* OpRegistry::Global() { return nullptr; }
Status OpRegistry::ProcessRegistrations() const { return Status::OK(); }
void OpRegistry::DeferRegistrations() {}

std::string AllocatorStats::DebugString() const { return ""; }
int DataTypeSize(DataType) { return 0; }
// void* DeviceMemAllocator::Alloc(unsigned long, unsigned long, unsigned long*){
// return nullptr;
// }
// void DeviceMemAllocator::Free(void*, unsigned long){}
// DeviceMemAllocator::DeviceMemAllocator(se::StreamExecutor*, PlatformDeviceId, bool,
// std::vector<std::function<void (void*, int, unsigned long)>,
// std::allocator<std::function<void (void*, int, unsigned long)> > > const&,
// std::vector<std::function<void (void*, int, unsigned long)>,
// std::allocator<std::function<void (void*, int, unsigned long)> > > const&){}
GPUBFCAllocator::GPUBFCAllocator(
std::unique_ptr<SubAllocator, std::default_delete<SubAllocator> >, unsigned long,
std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&,
GPUBFCAllocator::Options const&) {}
Status ValidateGPUMachineManager() { return Status::OK(); }
se::Platform* GPUMachineManager() { return nullptr; }
std::string GpuPlatformName() { return ""; }
Status LoadSavedModel(const SessionOptions& session_options, const RunOptions& run_options,
const std::string& export_dir,
const std::unordered_set<std::string>& tags,
SavedModelBundle* const bundle) {
return Status::OK();
}
errors::Code Status::code() const { return errors::Code::OK; }

const std::string& Status::error_message() const {
static std::string null = "";
return null;
}

void Status::Update(const Status& new_status) {}
void Status::IgnoreError() const {}
bool Status::ok() const { return true; }
Status& Status::operator=(Status const&) { return *this; }
std::string Status::ToString() const { return ""; }
SubAllocator::SubAllocator(
std::vector<std::function<void(void*, int, unsigned long)>,
std::allocator<std::function<void(void*, int, unsigned long)> > > const&,
std::vector<std::function<void(void*, int, unsigned long)>,
std::allocator<std::function<void(void*, int, unsigned long)> > > const&) {}
void SubAllocator::VisitAlloc(void*, int, unsigned long) {}
void SubAllocator::VisitFree(void*, int, unsigned long) {}

void TensorShape::AddDim(long) {}
int TensorShape::dims() const { return 0; }
int64_t TensorShape::dim_size(int) const { return 0; }
int64_t TensorShape::num_elements() const { return 0; }
TensorShape::TensorShape(std::initializer_list<long>) {}

std::string Tensor::DebugString(int) const { return ""; }
Tensor::~Tensor() {}
Tensor::Tensor(Allocator*, tensorflow::DataType, TensorShape const&) {}
Tensor::Tensor(Tensor const&) {}
Tensor::Tensor(DataType, TensorShape const&, TensorBuffer*) {}
size_t Tensor::TotalBytes() const { return 0; }
}; // namespace tensorflow
}; // namespace tensorflow_cpy
#pragma GCC diagnostic pop

0 comments on commit 2e0c4b4

Please sign in to comment.