Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove backend #706

Open
wants to merge 12 commits into
base: master
Choose a base branch
from
Open
5 changes: 5 additions & 0 deletions bindings/cpp/session.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1896,6 +1896,11 @@ async_backend_control_result session::disable_backend(const address &addr, uint3
return update_backend_status(backend_status_params(*this, addr, backend_id, DNET_BACKEND_DISABLE));
}

async_backend_control_result session::remove_backend(const address &addr, uint32_t backend_id)
{
return update_backend_status(backend_status_params(*this, addr, backend_id, DNET_BACKEND_REMOVE));
}

async_backend_control_result session::start_defrag(const address &addr, uint32_t backend_id)
{
backend_status_params params(*this, addr, backend_id, DNET_BACKEND_START_DEFRAG);
Expand Down
1 change: 1 addition & 0 deletions bindings/python/dnet_client
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,7 @@ def process_backend():
actions = {
'enable': lambda x, y, z: process_backend_control(x, y, z, elliptics.Session.enable_backend),
'disable': lambda x, y, z: process_backend_control(x, y, z, elliptics.Session.disable_backend),
'remove': lambda x, y, z: process_backend_control(x, y, z, elliptics.Session.remove_backend),
'defrag': lambda x, y, z: process_backend_control(x, y, z, elliptics.Session.start_defrag),
'compact': lambda x, y, z: process_backend_control(x, y, z, elliptics.Session.start_compact),
'stop_defrag': lambda x, y, z: process_backend_control(x, y, z, elliptics.Session.stop_defrag),
Expand Down
13 changes: 13 additions & 0 deletions bindings/python/elliptics_session.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -445,6 +445,12 @@ class elliptics_session: public session, public bp::wrapper<session> {
return create_result(std::move(session::disable_backend(address(host, port, family), backend_id)));
}

python_backend_status_result remove_backend(const std::string &host, int port, int family, uint64_t backend_id) {
return create_result(
session::remove_backend(address(host, port, family), backend_id)
);
}

python_backend_status_result start_defrag(const std::string &host, int port, int family, uint32_t backend_id) {
return create_result(std::move(session::start_defrag(address(host, port, family), backend_id)));
}
Expand Down Expand Up @@ -1431,6 +1437,13 @@ void init_elliptics_session() {
" Returns AsyncResult which provides new status of the backend\n\n"
" new_status = session.disable_backend(elliptics.Address.from_host_port_family(host='host.com', port=1025, family=AF_INET), 0).get()[0].backends[0]")

.def("remove_backend", &elliptics_session::remove_backend,
(bp::arg("host"), bp::arg("port"), bp::arg("family"), bp::arg("backend_id")),
"remove_backend(host, port, family, backend_id)\n"
" Removes backend @backend_id at node addressed by @host, @port, @family\n"
" Returns AsyncResult which provides new status of the backend\n\n"
" new_status = session.remove_backend(elliptics.Address.from_host_port_family(host='host.com', port=1025, family=AF_INET), 0).get()[0].backends[0]")

.def("start_defrag", &elliptics_session::start_defrag,
(bp::arg("host"), bp::arg("port"), bp::arg("family"), bp::arg("backend_id")),
"start_defrag(host, port, family, backend_id)\n"
Expand Down
10 changes: 10 additions & 0 deletions bindings/python/src/session.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,6 +197,16 @@ def disable_backend(self, address, backend_id):
family=address.family,
backend_id=backend_id)

def remove_backend(self, address, backend_id):
"""
Removes backend @backend_id on @address.
Return elliptics.AsyncResult that provides new status of backend.
"""
return super(Session, self).remove_backend(host=address.host,
port=address.port,
family=address.family,
backend_id=backend_id)

def start_defrag(self, address, backend_id):
"""
Starts defragmentation of backend @backend_id on @address.
Expand Down
38 changes: 24 additions & 14 deletions example/config.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -311,10 +311,28 @@ void parse_options(config_data *data, const config &options)
}
}

std::shared_ptr<dnet_backend_info> dnet_parse_backend(config_data *data, uint32_t backend_id, const config &backend)
{
auto info = std::make_shared<dnet_backend_info>(data->logger, backend_id);

info->enable_at_start = backend.at<bool>("enable", true);
info->read_only_at_start = backend.at<bool>("read_only", false);
info->state = DNET_BACKEND_DISABLED;
info->history = backend.at("history", std::string());

if (info->enable_at_start) {
// It's parsed to check configuration at start
// It will be reparsed again at backend's initialization anyway
info->parse(data, backend);
}

return info;
}

void parse_backends(config_data *data, const config &backends)
{
std::set<uint32_t> backends_ids;
auto &backends_info = data->backends->backends;
auto config_backends = data->backends;

for (size_t index = 0; index < backends.size(); ++index) {
const config backend = backends.at(index);
Expand All @@ -327,19 +345,11 @@ void parse_backends(config_data *data, const config &backends)
<< " duplicates one of previous backend_id";
}

while (backend_id + 1 > backends_info.size())
backends_info.emplace_back(data->logger, backends_info.size());

dnet_backend_info &info = backends_info[backend_id];
info.enable_at_start = backend.at<bool>("enable", true);
info.read_only_at_start = backend.at<bool>("read_only", false);
info.state = DNET_BACKEND_DISABLED;
info.history = backend.at("history", std::string());

if (info.enable_at_start) {
// It's parsed to check configuration at start
// It will be reparsed again at backend's initialization anyway
info.parse(data, backend);
if (!config_backends->get_backend(backend_id)) {
auto info = dnet_parse_backend(data, backend_id, backend);
if (info) {
config_backends->add_backend(info);
}
}
}
}
Expand Down
4 changes: 3 additions & 1 deletion example/config.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -354,7 +354,7 @@ struct config_data : public dnet_config_data
std::mutex parser_mutex;
std::shared_ptr<config_parser> parser;
dnet_time config_timestamp;
dnet_backend_info_list backends_guard;
dnet_backend_info_manager backends_guard;
std::string logger_value;
ioremap::elliptics::logger_base logger_base;
ioremap::elliptics::logger logger;
Expand All @@ -363,6 +363,8 @@ struct config_data : public dnet_config_data
std::unique_ptr<monitor::monitor_config> monitor_config;
};

std::shared_ptr<dnet_backend_info> dnet_parse_backend(config_data *data, uint32_t backend_id, const config &backend);

} } } // namespace ioremap::elliptics::config

#endif // CONFIG_HPP
3 changes: 2 additions & 1 deletion include/elliptics/packet.h
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,8 @@ enum dnet_backend_command {
DNET_BACKEND_READ_ONLY,
DNET_BACKEND_WRITEABLE,
DNET_BACKEND_CTL, // change internal parameters like delay
DNET_BACKEND_STOP_DEFRAG
DNET_BACKEND_STOP_DEFRAG,
DNET_BACKEND_REMOVE, // stop, cleanup and forget backend
};

enum dnet_backend_state {
Expand Down
58 changes: 53 additions & 5 deletions include/elliptics/session.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -76,8 +76,8 @@ void none(const error_info &error, const std::vector<dnet_cmd> &statuses);
/*!
* This handler allows to remove couple of replicas in case of bad writing
*
* If you write to 3 groups and at least 2 succesfull writings are mandotary and
* in case of fail all succesffully written entries must be removed the
* If you write to 3 groups and at least 2 successful writings are mandatory and
* in case of fail all successfully written entries must be removed the
* following code may be used:
*
* ```cpp
Expand Down Expand Up @@ -482,7 +482,7 @@ class session

/*!
* Reads data by \a id and passes it through \a converter. If converter returns the same data
* it's threated as data is already up-to-date, othwerwise low-level write-cas with proper
* it's threated as data is already up-to-date, otherwise low-level write-cas with proper
* checksum and \a remote_offset is invoked.
*
* If server returns -EBADFD data is read and processed again.
Expand Down Expand Up @@ -584,7 +584,7 @@ class session
* Lookups information for key \a id, picks lookup_result_enties by following rules:
* 1. If there are quorum lookup_result_enties with the same timestamp, they are the final result
* 2. Otherwise the final result is lookup_result_enties with the greatest timestamp
* This method is a wrapper over parallel_lookup and usefull in case of you need to find
* This method is a wrapper over parallel_lookup and useful in case of you need to find
* quorum identical replicas
*
* Returns async_lookup_result.
Expand Down Expand Up @@ -643,16 +643,64 @@ class session
*/
async_node_status_result request_node_status(const address &addr);

/*!
* Enables backend with @backend_id at node @addr.
*/
async_backend_control_result enable_backend(const address &addr, uint32_t backend_id);

/*!
* Disables backend with @backend_id at node @addr.
*/
async_backend_control_result disable_backend(const address &addr, uint32_t backend_id);

/*!
* Removes backend with @backend_id at node @addr.
* The backend will be stopped and uninitialized.
*/
async_backend_control_result remove_backend(const address &addr, uint32_t backend_id);

/*!
* Starts defragmentation at backend with @backend_id at node @addr.
*/
async_backend_control_result start_defrag(const address &addr, uint32_t backend_id);

/*!
* Starts compact (lightweight defragmentation) at backend with @backend_id at node @addr.
*/
async_backend_control_result start_compact(const address &addr, uint32_t backend_id);

/*!
* Stops defragmentation at backend with @backend_id at node @addr.
*/
async_backend_control_result stop_defrag(const address &addr, uint32_t backend_id);

/*!
* Updates ids which backend with @backend_id at node @addr serves.
*/
async_backend_control_result set_backend_ids(const address &addr, uint32_t backend_id,
const std::vector<dnet_raw_id> &ids);

/*!
* Makes backend with @backend_id at node @addr readonly.
* Backend in readonly mode fails all requests which modify data.
*/
async_backend_control_result make_readonly(const address &addr, uint32_t backend_id);

/*!
* Makes backend with @backend_id at node @addr writeable.
* Turns off read-only mode.
*/
async_backend_control_result make_writable(const address &addr, uint32_t backend_id);

/*!
* Sets delay in milliseconds to backend with @backend_id at node @addr.
* Backend with a delay will sleep @delay milliseconds before executing any request.
*/
async_backend_control_result set_delay(const address &addr, uint32_t backend_id, uint32_t delay);

/*!
* Requests status of all backends at node @addr.
*/
async_backend_status_result request_backends_status(const address &addr);

/*!
Expand Down Expand Up @@ -818,7 +866,7 @@ class session
* \brief Set \a indexes for object \a id.
*
* It removes object from all indexes which are not in the list \a indexes.
* All data in existen indexes are replaced by so from \a indexes.
* All data in existing indexes are replaced by so from \a indexes.
*
* Returns async_set_indexes_result.
*/
Expand Down
Loading