Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Notify the cloud about keepalive interval changes #1908

Draft
wants to merge 5 commits into
base: develop
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion communication/inc/communication_dynalib.h
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ DYNALIB_FN(BASE_IDX + 2, communication, extract_public_ec_key, int(uint8_t*, siz
#define BASE_IDX2 (BASE_IDX + 1)
#endif

DYNALIB_FN(BASE_IDX2 + 0, communication, spark_protocol_set_connection_property, int(ProtocolFacade*, unsigned, unsigned, particle::protocol::connection_properties_t*, void*))
DYNALIB_FN(BASE_IDX2 + 0, communication, spark_protocol_set_connection_property, int(ProtocolFacade*, unsigned, unsigned, const void*, void*))
DYNALIB_FN(BASE_IDX2 + 1, communication, spark_protocol_command, int(ProtocolFacade* protocol, ProtocolCommands::Enum cmd, uint32_t data, void* reserved))
DYNALIB_FN(BASE_IDX2 + 2, communication, spark_protocol_time_request_pending, bool(ProtocolFacade*, void*))
DYNALIB_FN(BASE_IDX2 + 3, communication, spark_protocol_time_last_synced, system_tick_t(ProtocolFacade*, time_t*, void*))
Expand All @@ -84,6 +84,7 @@ DYNALIB_FN(BASE_IDX3 + 0, communication, spark_protocol_get_describe_data, int(P
DYNALIB_FN(BASE_IDX3 + 1, communication, spark_protocol_post_description, int(ProtocolFacade*, int, void*))
DYNALIB_FN(BASE_IDX3 + 2, communication, spark_protocol_to_system_error, int(int))
DYNALIB_FN(BASE_IDX3 + 3, communication, spark_protocol_get_status, int(ProtocolFacade*, protocol_status*, void*))
DYNALIB_FN(BASE_IDX3 + 4, communication, spark_protocol_get_connection_property, int(ProtocolFacade*, unsigned, unsigned*, void*, void*))

DYNALIB_END(communication)

Expand Down
18 changes: 14 additions & 4 deletions communication/inc/protocol.h
Original file line number Diff line number Diff line change
Expand Up @@ -317,14 +317,24 @@ class Protocol
pinger.init(interval, timeout);
}

void set_keepalive(system_tick_t interval, keepalive_source_t source)
bool set_keepalive(system_tick_t interval, keepalive_source_t source)
{
pinger.set_interval(interval, source);
return pinger.set_interval(interval, source);
}

void set_fast_ota(unsigned data)
system_tick_t get_keepalive(keepalive_source_t* source = nullptr) const
{
chunkedTransfer.set_fast_ota(data);
return pinger.get_interval(source);
}

void set_fast_ota(bool enabled)
{
chunkedTransfer.set_fast_ota(enabled);
}

bool get_fast_ota() const
{
return chunkedTransfer.get_fast_ota();
}

void set_handlers(CommunicationsHandlers& handlers)
Expand Down
2 changes: 2 additions & 0 deletions communication/inc/protocol_defs.h
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,8 @@ enum ProtocolError
/* 24 */ IO_ERROR_LIGHTSSL_HANDSHAKE_NONCE,
/* 25 */ IO_ERROR_LIGHTSSL_HANDSHAKE_RECV_KEY,
/* 26 */ NOT_IMPLEMENTED,
/* 27 */ INVALID_PARAM,
/* 28 */ NOT_CHANGED,

/*
* NOTE: when adding more ProtocolError codes, be sure to update toSystemError() in protocol_defs.cpp
Expand Down
3 changes: 2 additions & 1 deletion communication/inc/spark_protocol_functions.h
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,8 @@ void spark_protocol_set_product_id(ProtocolFacade* protocol, product_id_t produc
void spark_protocol_set_product_firmware_version(ProtocolFacade* protocol, product_firmware_version_t product_firmware_version, unsigned int param=0, void* reserved = NULL);
void spark_protocol_get_product_details(ProtocolFacade* protocol, product_details_t* product_details, void* reserved=NULL);

int spark_protocol_set_connection_property(ProtocolFacade* protocol, unsigned property_id, unsigned data, particle::protocol::connection_properties_t* conn_prop, void* reserved);
int spark_protocol_set_connection_property(ProtocolFacade* protocol, unsigned property_id, unsigned value, const void* data, void* reserved);
int spark_protocol_get_connection_property(ProtocolFacade* protocol, unsigned property_id, unsigned* value, void* data, void* reserved);
bool spark_protocol_time_request_pending(ProtocolFacade* protocol, void* reserved=NULL);
system_tick_t spark_protocol_time_last_synced(ProtocolFacade* protocol, time_t* tm, void* reserved=NULL);

Expand Down
9 changes: 7 additions & 2 deletions communication/src/chunked_transfer.h
Original file line number Diff line number Diff line change
Expand Up @@ -144,12 +144,17 @@ class ChunkedTransfer

ProtocolError idle(MessageChannel& channel);

void set_fast_ota(unsigned data)
void set_fast_ota(bool enabled)
{
fast_ota_value = (data > 0) ? true : false;
fast_ota_value = enabled;
fast_ota_override = true;
}

bool get_fast_ota() const
{
return fast_ota_value;
}

bool is_updating()
{
return updating;
Expand Down
31 changes: 29 additions & 2 deletions communication/src/ping.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,15 @@ class Pinger
this->keepalive_source = KeepAliveSource::SYSTEM;
}

void set_interval(system_tick_t interval, keepalive_source_t source)
/**
* Sets the ping interval.
*
* @param interval New interval in milliseconds.
* @param source Source of the interval change. The interval set by the user application takes
* precedence over the interval set by the system.
* @return `true` if the current interval has been changed or `false` otherwise.
*/
bool set_interval(system_tick_t interval, keepalive_source_t source)
{
/**
* LAST CURRENT UPDATE?
Expand All @@ -34,11 +42,30 @@ class Pinger
* USER SYS NO
* USER USER YES
*/
// TODO: It feels that this logic should have been implemented in the system layer
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I agree with this. The comms layer just provides the timeout and the system layer manages the multiple stakeholders that may choose to change it. Similarly to how we manage the LED.

if ( !(this->keepalive_source == KeepAliveSource::USER && source == KeepAliveSource::SYSTEM) )
{
this->ping_interval = interval;
this->keepalive_source = source;
if (this->ping_interval != interval) {
this->ping_interval = interval;
return true;
}
}
return false;
}

/**
* Returns the current ping interval.
*
* @param source[out] Source of the last interval change.
* @return Interval in milliseconds.
*/
system_tick_t get_interval(keepalive_source_t* source = nullptr) const
{
if (source) {
*source = keepalive_source;
}
return ping_interval;
}

void reset()
Expand Down
4 changes: 4 additions & 0 deletions communication/src/protocol_defs.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,10 @@ system_error_t particle::protocol::toSystemError(ProtocolError error) {
return SYSTEM_ERROR_TOO_LARGE;
case NOT_IMPLEMENTED:
return SYSTEM_ERROR_NOT_SUPPORTED;
case INVALID_PARAM:
return SYSTEM_ERROR_INVALID_ARGUMENT;
case NOT_CHANGED:
return SYSTEM_ERROR_NOT_CHANGED;
default:
return SYSTEM_ERROR_PROTOCOL; // Generic protocol error
}
Expand Down
72 changes: 60 additions & 12 deletions communication/src/spark_protocol_functions.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -190,19 +190,60 @@ void spark_protocol_get_product_details(ProtocolFacade* protocol, product_detail
protocol->get_product_details(*details);
}

int spark_protocol_set_connection_property(ProtocolFacade* protocol, unsigned property_id,
unsigned data, particle::protocol::connection_properties_t* conn_prop, void* reserved)
int spark_protocol_set_connection_property(ProtocolFacade* protocol, unsigned property_id, unsigned value,
const void* data, void* reserved)
{
ASSERT_ON_SYSTEM_THREAD();
if (property_id == particle::protocol::Connection::PING)
{
protocol->set_keepalive(data, conn_prop->keepalive_source);
} else if (property_id == particle::protocol::Connection::FAST_OTA)
{
protocol->set_fast_ota(data);
switch (property_id) {
case particle::protocol::Connection::PING: {
particle::protocol::keepalive_source_t source = particle::protocol::KeepAliveSource::SYSTEM;
if (data) {
const auto d = static_cast<const particle::protocol::connection_properties_t*>(data);
source = d->keepalive_source;
}
const bool changed = protocol->set_keepalive(value, source);
if (!changed) {
return particle::protocol::NOT_CHANGED;
}
return 0;
}
case particle::protocol::Connection::FAST_OTA: {
protocol->set_fast_ota(value);
return 0;
}
default:
return particle::protocol::INVALID_PARAM;
}
}

int spark_protocol_get_connection_property(ProtocolFacade* protocol, unsigned property_id, unsigned* value, void* data,
void* reserved)
{
ASSERT_ON_SYSTEM_THREAD();
switch (property_id) {
case particle::protocol::Connection::PING: {
particle::protocol::keepalive_source_t source = particle::protocol::KeepAliveSource::SYSTEM;
const system_tick_t interval = protocol->get_keepalive(&source);
if (value) {
*value = interval;
}
if (data) {
const auto d = static_cast<particle::protocol::connection_properties_t*>(data);
d->keepalive_source = source;
}
return 0;
}
case particle::protocol::Connection::FAST_OTA: {
if (value) {
*value = protocol->get_fast_ota();
}
return 0;
}
default:
return particle::protocol::INVALID_PARAM;
}
return 0;
}

int spark_protocol_command(ProtocolFacade* protocol, ProtocolCommands::Enum cmd, uint32_t data, void* reserved)
{
ASSERT_ON_SYSTEM_THREAD();
Expand All @@ -214,6 +255,7 @@ bool spark_protocol_time_request_pending(ProtocolFacade* protocol, void* reserve
(void)reserved;
return protocol->time_request_pending();
}

system_tick_t spark_protocol_time_last_synced(ProtocolFacade* protocol, time_t* tm, void* reserved)
{
(void)reserved;
Expand Down Expand Up @@ -348,10 +390,16 @@ void spark_protocol_get_product_details(CoreProtocol* protocol, product_details_
protocol->get_product_details(*details);
}

int spark_protocol_set_connection_property(CoreProtocol* protocol, unsigned property_id,
unsigned data, particle::protocol::connection_properties_t* conn_prop, void* reserved)
int spark_protocol_set_connection_property(CoreProtocol* protocol, unsigned property_id, unsigned value, const void* data,
void* reserved)
{
return particle::protocol::NOT_IMPLEMENTED;
}

int spark_protocol_get_connection_property(CoreProtocol* protocol, unsigned property_id, unsigned* value, void* data,
void* reserved)
{
return 0;
return particle::protocol::NOT_IMPLEMENTED;
}

int spark_protocol_command(CoreProtocol* protocol, ProtocolCommands::Enum cmd, uint32_t data, void* reserved)
Expand Down
1 change: 1 addition & 0 deletions services/inc/system_error.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
(BUSY, "Resource busy", -110), \
(NOT_SUPPORTED, "Not supported", -120), \
(NOT_ALLOWED, "Not allowed", -130), \
(NOT_CHANGED, "Not changed", -131), \
(CANCELLED, "Operation cancelled", -140), \
(ABORTED, "Operation aborted", -150), \
(TIMEOUT, "Timeout error", -160), \
Expand Down
2 changes: 1 addition & 1 deletion system/inc/system_cloud.h
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,7 @@ bool spark_cloud_flag_auto_connect(void);

ProtocolFacade* system_cloud_protocol_instance(void);

int spark_set_connection_property(unsigned property_id, unsigned data, particle::protocol::connection_properties_t* conn_prop, void* reserved);
int spark_set_connection_property(unsigned property_id, unsigned value, const void* data, void* reserved);

int spark_set_random_seed_from_cloud_handler(void (*handler)(unsigned int), void* reserved);

Expand Down
2 changes: 1 addition & 1 deletion system/inc/system_dynalib_cloud.h
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ DYNALIB_FN(10, system_cloud, spark_unsubscribe, void(void*))
DYNALIB_FN(11, system_cloud, spark_sync_time, bool(void*))
DYNALIB_FN(12, system_cloud, spark_sync_time_pending, bool(void*))
DYNALIB_FN(13, system_cloud, spark_sync_time_last, system_tick_t(time_t*, void*))
DYNALIB_FN(14, system_cloud, spark_set_connection_property, int(unsigned, unsigned, particle::protocol::connection_properties_t*, void*))
DYNALIB_FN(14, system_cloud, spark_set_connection_property, int(unsigned, unsigned, const void*, void*))
DYNALIB_FN(15, system_cloud, spark_set_random_seed_from_cloud_handler, int(void (*handler)(unsigned int), void*))
DYNALIB_FN(16, system_cloud, spark_publish_vitals, int(system_tick_t, void*))

Expand Down
18 changes: 15 additions & 3 deletions system/src/system_cloud.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ extern void (*random_seed_from_cloud_handler)(unsigned int);
namespace
{

using namespace particle;
using namespace particle::system;

#if PLATFORM_THREADING
Expand Down Expand Up @@ -245,10 +246,21 @@ String spark_deviceID(void)
return bytes2hex(id, len);
}

int spark_set_connection_property(unsigned property_id, unsigned data, particle::protocol::connection_properties_t* conn_prop, void* reserved)
int spark_set_connection_property(unsigned property_id, unsigned value, const void* data, void* reserved)
{
SYSTEM_THREAD_CONTEXT_SYNC(spark_set_connection_property(property_id, data, conn_prop, reserved));
return spark_protocol_set_connection_property(sp, property_id, data, conn_prop, reserved);
SYSTEM_THREAD_CONTEXT_SYNC(spark_set_connection_property(property_id, value, data, reserved));
const auto r = spark_protocol_set_connection_property(sp, property_id, value, data, reserved);
if (r != 0) {
return spark_protocol_to_system_error(r);
}
// Publish the new keepalive interval if the connection to the cloud is established or the
// system handshake is in progress
if (property_id == protocol::Connection::PING && (spark_cloud_flag_connected() || SPARK_CLOUD_HANDSHAKE_PENDING)) {
if (!publishKeepaliveInterval(value)) {
LOG(WARN, "Unable to publish the keepalive interval");
}
}
return 0;
}

int spark_set_random_seed_from_cloud_handler(void (*handler)(unsigned int), void* reserved)
Expand Down
29 changes: 29 additions & 0 deletions system/src/system_cloud_internal.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@

using particle::CloudDiagnostics;
using particle::publishEvent;
using particle::publishKeepaliveInterval;

#ifndef SPARK_NO_CLOUD

Expand Down Expand Up @@ -358,6 +359,7 @@ constexpr const char KEY_RESTORE_EVENT[] = "spark/device/key/restore";
constexpr const char DEVICE_UPDATES_EVENT[] = "particle/device/updates/";
constexpr const char FORCED_EVENT[] = "forced";
constexpr const char UPDATES_PENDING_EVENT[] = "pending";
constexpr const char KEEPALIVE_INTERVAL_EVENT[] = "particle/device/keepalive";
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The device service presently already has the [spark/particle]/session/timeout event, which was made before we had UDP, so the name is now somewhat confusing, but it serves to feed the IdleChecker about how long to consider the device offline.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good to know, thanks! I also noticed that the device service expects the interval to be in seconds, while the current code sends it in milliseconds. Are we sure it's safe if Device OS will start publishing session/timeout events? cc @JamesHagerman

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@m-mcgowan @JamesHagerman Pinging here again. Should I change the event name to particle/session/timeout?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

For visibility: we discussed this internally and decided to use the new event name for now.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@m-mcgowan Does the rest of the PR look good to you?


inline bool is_suffix(const char* eventName, const char* prefix, const char* suffix) {
// todo - sanity check parameters?
Expand Down Expand Up @@ -1001,6 +1003,7 @@ int Spark_Handshake(bool presence_announce)
}

Send_Firmware_Update_Flags();
publishKeepaliveInterval();

if (presence_announce) {
Multicast_Presence_Announcement();
Expand All @@ -1018,6 +1021,7 @@ int Spark_Handshake(bool presence_announce)

publishSafeModeEventIfNeeded();
Send_Firmware_Update_Flags();
publishKeepaliveInterval();

if (!HAL_RTC_Time_Is_Valid(nullptr) && spark_sync_time_last(nullptr, nullptr) == 0) {
spark_protocol_send_time_request(sp);
Expand Down Expand Up @@ -1114,3 +1118,28 @@ void Spark_Wake(void)
CloudDiagnostics* CloudDiagnostics::instance() {
return &g_cloudDiagnostics;
}

namespace particle {

#ifndef SPARK_NO_CLOUD

bool publishKeepaliveInterval(unsigned interval) {
if (!interval) {
// Get the current interval
const auto r = spark_protocol_get_connection_property(spark_protocol_instance(), protocol::Connection::PING,
&interval, nullptr, nullptr);
if (r != 0) {
return false;
}
}
// TODO: Even though the keepalive interval is not supposed to be changed frequently, it would be
// nice to make sure the previously published event is either sent or cancelled before publishing
// a new event. This would help to mitigate the effect of possible out of order delivery
char buf[16] = {};
snprintf(buf, sizeof(buf), "%u", interval);
return publishEvent(KEEPALIVE_INTERVAL_EVENT, buf);
}

#endif // !defined(SPARK_NO_CLOUD)

} // particle
2 changes: 2 additions & 0 deletions system/src/system_cloud_internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,8 @@ inline bool publishEvent(const char* event, const char* data = nullptr, unsigned
return spark_send_event(event, data, DEFAULT_CLOUD_EVENT_TTL, flags | PUBLISH_EVENT_FLAG_PRIVATE, nullptr);
}

bool publishKeepaliveInterval(unsigned interval = 0);

} // namespace particle

#ifdef __cplusplus
Expand Down
Loading