Line data Source code
1 : /*
2 : *
3 : * Copyright (c) 2021 Project CHIP Authors
4 : * All rights reserved.
5 : *
6 : * Licensed under the Apache License, Version 2.0 (the "License");
7 : * you may not use this file except in compliance with the License.
8 : * You may obtain a copy of the License at
9 : *
10 : * http://www.apache.org/licenses/LICENSE-2.0
11 : *
12 : * Unless required by applicable law or agreed to in writing, software
13 : * distributed under the License is distributed on an "AS IS" BASIS,
14 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 : * See the License for the specific language governing permissions and
16 : * limitations under the License.
17 : */
18 :
19 : #include <access/AccessRestrictionProvider.h>
20 : #include <access/Privilege.h>
21 : #include <app/AppConfig.h>
22 : #include <app/AttributePathExpandIterator.h>
23 : #include <app/ConcreteEventPath.h>
24 : #include <app/GlobalAttributes.h>
25 : #include <app/InteractionModelEngine.h>
26 : #include <app/MessageDef/StatusIB.h>
27 : #include <app/data-model-provider/ActionReturnStatus.h>
28 : #include <app/data-model-provider/AttributeChangeListener.h>
29 : #include <app/data-model-provider/MetadataLookup.h>
30 : #include <app/data-model-provider/MetadataTypes.h>
31 : #include <app/data-model-provider/Provider.h>
32 : #include <app/icd/server/ICDServerConfig.h>
33 : #include <app/reporting/Engine.h>
34 : #include <app/reporting/reporting.h>
35 : #include <app/util/MatterCallbacks.h>
36 : #include <lib/core/CHIPError.h>
37 : #include <lib/core/DataModelTypes.h>
38 : #include <lib/support/CodeUtils.h>
39 : #include <protocols/interaction_model/StatusCode.h>
40 :
41 : #include <optional>
42 :
43 : #if CHIP_CONFIG_ENABLE_ICD_SERVER
44 : #include <app/icd/server/ICDNotifier.h> // nogncheck
45 : #endif
46 :
47 : using namespace chip::Access;
48 :
49 : namespace chip {
50 : namespace app {
51 : namespace reporting {
52 : namespace {
53 :
54 : using DataModel::ReadFlags;
55 : using Protocols::InteractionModel::Status;
56 :
57 : /// Returns the status of ACL validation.
58 : /// If the return value has a status set, that means the ACL check failed,
59 : /// the read must not be performed, and the returned status (which may
60 : /// be success, when dealing with non-concrete paths) should be used
61 : /// as the status for the read.
62 : ///
63 : /// If the returned value is std::nullopt, that means the ACL check passed and the
64 : /// read should proceed.
65 9848 : std::optional<CHIP_ERROR> ValidateReadAttributeACL(const SubjectDescriptor & subjectDescriptor,
66 : const ConcreteReadAttributePath & path, Privilege requiredPrivilege)
67 : {
68 :
69 9848 : RequestPath requestPath{ .cluster = path.mClusterId,
70 9848 : .endpoint = path.mEndpointId,
71 : .requestType = RequestType::kAttributeReadRequest,
72 9848 : .entityId = path.mAttributeId };
73 :
74 9848 : CHIP_ERROR err = GetAccessControl().Check(subjectDescriptor, requestPath, requiredPrivilege);
75 19696 : if (err == CHIP_NO_ERROR)
76 : {
77 9847 : return std::nullopt;
78 : }
79 2 : VerifyOrReturnError((err == CHIP_ERROR_ACCESS_DENIED) || (err == CHIP_ERROR_ACCESS_RESTRICTED_BY_ARL), err);
80 :
81 : // Implementation of 8.4.3.2 of the spec for path expansion
82 1 : if (path.mExpanded)
83 : {
84 0 : return CHIP_NO_ERROR;
85 : }
86 :
87 : // access denied and access restricted have specific codes for IM
88 2 : return err == CHIP_ERROR_ACCESS_DENIED ? CHIP_IM_GLOBAL_STATUS(UnsupportedAccess) : CHIP_IM_GLOBAL_STATUS(AccessRestricted);
89 : }
90 :
91 : /// Checks that the given attribute path corresponds to a readable attribute. If not, it
92 : /// will return the corresponding failure status.
93 4924 : std::optional<Status> ValidateAttributeIsReadable(DataModel::Provider * dataModel, const ConcreteReadAttributePath & path,
94 : const std::optional<DataModel::AttributeEntry> & entry)
95 : {
96 4924 : if (!entry.has_value())
97 : {
98 1 : return DataModel::ValidateClusterPath(dataModel, path, Status::UnsupportedAttribute);
99 : }
100 :
101 4923 : if (!entry->GetReadPrivilege().has_value())
102 : {
103 0 : return Status::UnsupportedRead;
104 : }
105 :
106 4923 : return std::nullopt;
107 : }
108 :
109 4925 : DataModel::ActionReturnStatus RetrieveClusterData(DataModel::Provider * dataModel, const SubjectDescriptor & subjectDescriptor,
110 : BitFlags<ReadFlags> flags, AttributeReportIBs::Builder & reportBuilder,
111 : const ConcreteReadAttributePath & path, AttributeEncodeState * encoderState)
112 : {
113 4925 : ChipLogDetail(DataManagement, "<RE:Run> Cluster %" PRIx32 ", Attribute %" PRIx32 " is dirty", path.mClusterId,
114 : path.mAttributeId);
115 4925 : DataModelCallbacks::GetInstance()->AttributeOperation(DataModelCallbacks::OperationType::Read,
116 : DataModelCallbacks::OperationOrder::Pre, path);
117 :
118 4925 : DataModel::ReadAttributeRequest readRequest(path, subjectDescriptor);
119 :
120 4925 : readRequest.readFlags = flags;
121 :
122 4925 : DataModel::ServerClusterFinder serverClusterFinder(dataModel);
123 :
124 4925 : DataVersion version = 0;
125 4925 : if (auto clusterInfo = serverClusterFinder.Find(path); clusterInfo.has_value())
126 : {
127 4924 : version = clusterInfo->dataVersion;
128 : }
129 : else
130 : {
131 1 : ChipLogError(DataManagement, "Read request on unknown cluster - no data version available");
132 : }
133 :
134 4925 : TLV::TLVWriter checkpoint;
135 4925 : reportBuilder.Checkpoint(checkpoint);
136 :
137 4925 : DataModel::ActionReturnStatus status(CHIP_NO_ERROR);
138 4925 : bool isFabricFiltered = flags.Has(ReadFlags::kFabricFiltered);
139 4925 : AttributeValueEncoder attributeValueEncoder(reportBuilder, subjectDescriptor, path, version, isFabricFiltered, encoderState);
140 :
141 : // TODO: we explicitly DO NOT validate that path is a valid cluster path (even more, above serverClusterFinder
142 : // explicitly ignores that case).
143 : // Validation of attribute existence is done after ACL, in `ValidateAttributeIsReadable` below
144 : //
145 : // See https://github.com/project-chip/connectedhomeip/issues/37410
146 :
147 : // Execute the ACL Access Granting Algorithm before existence checks, assuming the required_privilege for the element is
148 : // View, to determine if the subject would have had at least some access against the concrete path. This is done so we don't
149 : // leak information if we do fail existence checks.
150 :
151 4925 : DataModel::AttributeFinder finder(dataModel);
152 4925 : std::optional<DataModel::AttributeEntry> entry = finder.Find(path);
153 :
154 4925 : if (auto access_status = ValidateReadAttributeACL(subjectDescriptor, path, Privilege::kView); access_status.has_value())
155 : {
156 1 : status = *access_status;
157 : }
158 4924 : else if (auto readable_status = ValidateAttributeIsReadable(dataModel, path, entry); readable_status.has_value())
159 : {
160 1 : status = *readable_status;
161 : }
162 : // Execute the ACL Access Granting Algorithm against the concrete path a second time, using the actual required_privilege.
163 : // entry->GetReadPrivilege() is guaranteed to have a value, since that condition is checked in the previous condition (inside
164 : // ValidateAttributeIsReadable()).
165 : // NOLINTNEXTLINE(bugprone-unchecked-optional-access)
166 9846 : else if (auto required_privilege_status = ValidateReadAttributeACL(subjectDescriptor, path, entry->GetReadPrivilege().value());
167 4923 : required_privilege_status.has_value())
168 : {
169 0 : status = *required_privilege_status;
170 : }
171 4923 : else if (IsSupportedGlobalAttributeNotInMetadata(readRequest.path.mAttributeId))
172 : {
173 : // Global attributes are NOT directly handled by data model providers, instead
174 : // they are routed through metadata.
175 1402 : status = ReadGlobalAttributeFromMetadata(dataModel, readRequest.path, attributeValueEncoder);
176 : }
177 : else
178 : {
179 3521 : status = dataModel->ReadAttribute(readRequest, attributeValueEncoder);
180 : }
181 :
182 4925 : if (status.IsSuccess())
183 : {
184 : // TODO: this callback being only executed on success is awkward. The Write callback is always done
185 : // for both read and write.
186 : //
187 : // For now this preserves existing/previous code logic, however we should consider to ALWAYS
188 : // call this.
189 4527 : DataModelCallbacks::GetInstance()->AttributeOperation(DataModelCallbacks::OperationType::Read,
190 : DataModelCallbacks::OperationOrder::Post, path);
191 4527 : return status;
192 : }
193 :
194 : // Encoder state is relevant for errors in case they are retryable.
195 : //
196 : // Generally only out of space encoding errors would be retryable, however we save the state
197 : // for all errors in case this is information that is useful (retry or error position).
198 398 : if (encoderState != nullptr)
199 : {
200 398 : *encoderState = attributeValueEncoder.GetState();
201 : }
202 :
203 : #if CHIP_CONFIG_DATA_MODEL_EXTRA_LOGGING
204 : // Out of space errors may be chunked data, reporting those cases would be very confusing
205 : // as they are not fully errors. Report only others (which presumably are not recoverable
206 : // and will be sent to the client as well).
207 398 : if (!status.IsOutOfSpaceEncodingResponse())
208 : {
209 2 : DataModel::ActionReturnStatus::StringStorage storage;
210 2 : ChipLogError(DataManagement, "Failed to read attribute: %s", status.c_str(storage));
211 : }
212 : #endif
213 398 : return status;
214 4925 : }
215 :
216 109 : bool IsClusterDataVersionEqualTo(DataModel::Provider * dataModel, const ConcreteClusterPath & path, DataVersion dataVersion)
217 : {
218 109 : DataModel::ServerClusterFinder serverClusterFinder(dataModel);
219 109 : auto info = serverClusterFinder.Find(path);
220 :
221 109 : return info.has_value() && (info->dataVersion == dataVersion);
222 109 : }
223 :
224 : /// Check if the given `err` is a known ACL error that can be translated into
225 : /// a StatusIB (UnsupportedAccess/AccessRestricted)
226 : ///
227 : /// Returns true if the error could be translated and places the result into `outStatus`.
228 : /// `path` is used for logging.
229 113 : bool IsTranslatableAclError(const ConcreteEventPath & path, const CHIP_ERROR & err, StatusIB & outStatus)
230 : {
231 337 : if ((err != CHIP_ERROR_ACCESS_DENIED) && (err != CHIP_ERROR_ACCESS_RESTRICTED_BY_ARL))
232 : {
233 111 : return false;
234 : }
235 :
236 2 : ChipLogDetail(InteractionModel, "Access to event (%u, " ChipLogFormatMEI ", " ChipLogFormatMEI ") denied by %s",
237 : path.mEndpointId, ChipLogValueMEI(path.mClusterId), ChipLogValueMEI(path.mEventId),
238 : err == CHIP_ERROR_ACCESS_DENIED ? "ACL" : "ARL");
239 :
240 4 : outStatus = err == CHIP_ERROR_ACCESS_DENIED ? StatusIB(Status::UnsupportedAccess) : StatusIB(Status::AccessRestricted);
241 2 : return true;
242 : }
243 :
244 58 : CHIP_ERROR CheckEventValidity(const ConcreteEventPath & path, const SubjectDescriptor & subjectDescriptor,
245 : DataModel::Provider * provider, StatusIB & outStatus)
246 : {
247 : // We validate ACL before Path, however this means we do not want the real ACL check
248 : // to be blocked by a `Invalid endpoint id` error when checking event info.
249 : // As a result, we check for VIEW privilege on the cluster first (most permissive)
250 : // and will do a 2nd check for the actual required privilege as a followup.
251 58 : RequestPath requestPath{
252 58 : .cluster = path.mClusterId,
253 58 : .endpoint = path.mEndpointId,
254 : .requestType = RequestType::kEventReadRequest,
255 58 : .entityId = path.mEventId,
256 58 : };
257 58 : CHIP_ERROR err = GetAccessControl().Check(subjectDescriptor, requestPath, Access::Privilege::kView);
258 58 : if (IsTranslatableAclError(path, err, outStatus))
259 : {
260 2 : return CHIP_NO_ERROR;
261 : }
262 56 : ReturnErrorOnFailure(err);
263 :
264 : DataModel::EventEntry eventInfo;
265 56 : err = provider->EventInfo(path, eventInfo);
266 112 : if (err != CHIP_NO_ERROR)
267 : {
268 : // cannot get event data to validate. Event is not supported.
269 : // we still fall through into "ValidateClusterPath" to try to return a `better code`
270 : // (i.e. say invalid endpoint or cluster), however if path seems ok we will
271 : // return unsupported event as we failed to get event metadata.
272 1 : outStatus = StatusIB(DataModel::ValidateClusterPath(provider, path, Status::UnsupportedEvent));
273 1 : return CHIP_NO_ERROR;
274 : }
275 :
276 : // Although EventInfo() was successful, we still need to Validate Cluster Path since providers MAY return CHIP_NO_ERROR although
277 : // events are unknown.
278 55 : Status status = DataModel::ValidateClusterPath(provider, path, Status::Success);
279 55 : if (status != Status::Success)
280 : {
281 : // a valid status available: failure
282 0 : outStatus = StatusIB(status);
283 0 : return CHIP_NO_ERROR;
284 : }
285 :
286 : // Per spec, the required-privilege ACL check is performed only after path existence is validated
287 55 : err = GetAccessControl().Check(subjectDescriptor, requestPath, eventInfo.readPrivilege);
288 55 : if (IsTranslatableAclError(path, err, outStatus))
289 : {
290 0 : return CHIP_NO_ERROR;
291 : }
292 55 : ReturnErrorOnFailure(err);
293 :
294 : // set up the status as "OK" Since all above checks passed
295 55 : outStatus = StatusIB(Status::Success);
296 :
297 : // Status was set above = Success
298 55 : return CHIP_NO_ERROR;
299 : }
300 :
301 : } // namespace
302 :
303 157 : Engine::Engine(InteractionModelEngine * apImEngine) : mpImEngine(apImEngine) {}
304 :
305 477 : CHIP_ERROR Engine::Init(EventManagement * apEventManagement)
306 : {
307 477 : VerifyOrReturnError(apEventManagement != nullptr, CHIP_ERROR_INVALID_ARGUMENT);
308 477 : mNumReportsInFlight = 0;
309 477 : mCurReadHandlerIdx = 0;
310 477 : mpEventManagement = apEventManagement;
311 :
312 477 : return CHIP_NO_ERROR;
313 : }
314 :
315 349 : void Engine::Shutdown()
316 : {
317 : // Flush out the event buffer synchronously
318 349 : ScheduleUrgentEventDeliverySync();
319 :
320 349 : mNumReportsInFlight = 0;
321 349 : mCurReadHandlerIdx = 0;
322 349 : mGlobalDirtySet.ReleaseAll();
323 349 : }
324 :
325 4722 : bool Engine::IsClusterDataVersionMatch(const SingleLinkedListNode<DataVersionFilter> * aDataVersionFilterList,
326 : const ConcreteReadAttributePath & aPath)
327 : {
328 4722 : bool existPathMatch = false;
329 4722 : bool existVersionMismatch = false;
330 43484 : for (auto filter = aDataVersionFilterList; filter != nullptr; filter = filter->mpNext)
331 : {
332 38762 : if (aPath.mEndpointId == filter->mValue.mEndpointId && aPath.mClusterId == filter->mValue.mClusterId)
333 : {
334 109 : existPathMatch = true;
335 :
336 109 : if (!IsClusterDataVersionEqualTo(mpImEngine->GetDataModelProvider(),
337 218 : ConcreteClusterPath(filter->mValue.mEndpointId, filter->mValue.mClusterId),
338 109 : filter->mValue.mDataVersion.Value()))
339 : {
340 79 : existVersionMismatch = true;
341 : }
342 : }
343 : }
344 4722 : return existPathMatch && !existVersionMismatch;
345 : }
346 :
347 2495 : static bool IsOutOfWriterSpaceError(CHIP_ERROR err)
348 : {
349 6574 : return err == CHIP_ERROR_NO_MEMORY || err == CHIP_ERROR_BUFFER_TOO_SMALL;
350 : }
351 :
352 1980 : CHIP_ERROR Engine::BuildSingleReportDataAttributeReportIBs(ReportDataMessage::Builder & aReportDataBuilder,
353 : ReadHandler * apReadHandler, bool * apHasMoreChunks,
354 : bool * apHasEncodedData)
355 : {
356 1980 : CHIP_ERROR err = CHIP_NO_ERROR;
357 1980 : bool attributeDataWritten = false;
358 1980 : bool hasMoreChunks = true;
359 1980 : TLV::TLVWriter backup;
360 1980 : const uint32_t kReservedSizeEndOfReportIBs = 1;
361 1980 : bool reservedEndOfReportIBs = false;
362 :
363 1980 : aReportDataBuilder.Checkpoint(backup);
364 :
365 1980 : AttributeReportIBs::Builder & attributeReportIBs = aReportDataBuilder.CreateAttributeReportIBs();
366 1980 : size_t emptyReportDataLength = 0;
367 :
368 1980 : SuccessOrExit(err = aReportDataBuilder.GetError());
369 :
370 1980 : emptyReportDataLength = attributeReportIBs.GetWriter()->GetLengthWritten();
371 : //
372 : // Reserve enough space for closing out the Report IB list
373 : //
374 1980 : SuccessOrExit(err = attributeReportIBs.GetWriter()->ReserveBuffer(kReservedSizeEndOfReportIBs));
375 1980 : reservedEndOfReportIBs = true;
376 :
377 : {
378 : // TODO: Figure out how AttributePathExpandIterator should handle read
379 : // vs write paths.
380 1980 : ConcreteAttributePath readPath;
381 :
382 1980 : ChipLogDetail(DataManagement,
383 : "Building Reports for ReadHandler with LastReportGeneration = 0x%08lX DirtyGeneration = 0x%08lX",
384 : static_cast<long>(apReadHandler->mPreviousReportsBeginGeneration.Raw()),
385 : static_cast<long>(apReadHandler->mDirtyGeneration.Raw()));
386 :
387 : // This ReadHandler is not generating reports, so we reset the iterator for a clean start.
388 1980 : if (!apReadHandler->IsReporting())
389 : {
390 1168 : apReadHandler->ResetPathIterator();
391 : }
392 :
393 : #if CONFIG_BUILD_FOR_HOST_UNIT_TEST
394 1980 : uint32_t attributesRead = 0;
395 : #endif
396 :
397 : // For each path included in the interested path of the read handler...
398 1980 : for (RollbackAttributePathExpandIterator iterator(mpImEngine->GetDataModelProvider(),
399 1980 : apReadHandler->AttributeIterationPosition());
400 6958 : iterator.Next(readPath); iterator.MarkCompleted())
401 : {
402 5394 : if (!apReadHandler->IsPriming())
403 : {
404 672 : bool concretePathDirty = false;
405 : // TODO: Optimize this implementation by making the iterator only emit intersected paths.
406 672 : mGlobalDirtySet.ForEachActiveObject([&](auto * dirtyPath) {
407 815 : if (dirtyPath->IsAttributePathSupersetOf(readPath))
408 : {
409 : // We don't need to worry about paths that were already marked dirty before the last time this read handler
410 : // started a report that it completed: those paths already got reported.
411 252 : if (dirtyPath->mGeneration.After(apReadHandler->mPreviousReportsBeginGeneration))
412 : {
413 249 : concretePathDirty = true;
414 249 : return Loop::Break;
415 : }
416 : }
417 566 : return Loop::Continue;
418 : });
419 :
420 672 : if (!concretePathDirty)
421 : {
422 : // This attribute is not dirty, we just skip this one.
423 423 : continue;
424 : }
425 : }
426 : else
427 : {
428 4722 : if (IsClusterDataVersionMatch(apReadHandler->GetDataVersionFilterList(), readPath))
429 : {
430 26 : continue;
431 : }
432 : }
433 :
434 : #if CONFIG_BUILD_FOR_HOST_UNIT_TEST
435 4945 : attributesRead++;
436 4945 : if (attributesRead > mMaxAttributesPerChunk)
437 : {
438 416 : ExitNow(err = CHIP_ERROR_BUFFER_TOO_SMALL);
439 : }
440 : #endif
441 :
442 : // If we are processing a read request, or the initial report of a subscription, just regard all paths as dirty
443 : // paths.
444 4925 : TLV::TLVWriter attributeBackup;
445 4925 : attributeReportIBs.Checkpoint(attributeBackup);
446 4925 : ConcreteReadAttributePath pathForRetrieval(readPath);
447 : // Load the saved state from previous encoding session for chunking of one single attribute (list chunking).
448 4925 : AttributeEncodeState encodeState = apReadHandler->GetAttributeEncodeState();
449 4925 : BitFlags<ReadFlags> flags;
450 4925 : flags.Set(ReadFlags::kFabricFiltered, apReadHandler->IsFabricFiltered());
451 4925 : flags.Set(ReadFlags::kAllowsLargePayload, apReadHandler->AllowsLargePayload());
452 : DataModel::ActionReturnStatus status =
453 4925 : RetrieveClusterData(mpImEngine->GetDataModelProvider(), apReadHandler->GetSubjectDescriptor(), flags,
454 : attributeReportIBs, pathForRetrieval, &encodeState);
455 4925 : if (status.IsError())
456 : {
457 : // Operation error set, since this will affect early return or override on status encoding
458 : // it will also be used for error reporting below.
459 398 : err = status.GetUnderlyingError();
460 :
461 : // If error is not an "out of writer space" error, rollback and encode status.
462 : // Otherwise, if partial data allowed, save the encode state.
463 : // Otherwise roll back. If we have already encoded some chunks, we are done; otherwise encode status.
464 :
465 398 : if (encodeState.AllowPartialData() && status.IsOutOfSpaceEncodingResponse())
466 : {
467 255 : ChipLogDetail(DataManagement,
468 : "List does not fit in packet, chunk between list items for clusterId: " ChipLogFormatMEI
469 : ", attributeId: " ChipLogFormatMEI,
470 : ChipLogValueMEI(pathForRetrieval.mClusterId), ChipLogValueMEI(pathForRetrieval.mAttributeId));
471 : // Encoding is aborted but partial data is allowed, then we don't rollback and save the state for next chunk.
472 : // The expectation is that RetrieveClusterData has already reset attributeReportIBs to a good state (rolled
473 : // back any partially-written AttributeReportIB instances, reset its error status). Since AllowPartialData()
474 : // is true, we may not have encoded a complete attribute value, but we did, if we encoded anything, encode a
475 : // set of complete AttributeReportIB instances that represent part of the attribute value.
476 255 : apReadHandler->SetAttributeEncodeState(encodeState);
477 : }
478 : else
479 : {
480 : // We met a error during writing reports, one common case is we are running out of buffer, rollback the
481 : // attributeReportIB to avoid any partial data.
482 143 : attributeReportIBs.Rollback(attributeBackup);
483 143 : apReadHandler->SetAttributeEncodeState(AttributeEncodeState());
484 :
485 143 : if (!status.IsOutOfSpaceEncodingResponse())
486 : {
487 2 : ChipLogError(DataManagement,
488 : "Fail to retrieve data, roll back and encode status on clusterId: " ChipLogFormatMEI
489 : ", attributeId: " ChipLogFormatMEI "err = %" CHIP_ERROR_FORMAT,
490 : ChipLogValueMEI(pathForRetrieval.mClusterId), ChipLogValueMEI(pathForRetrieval.mAttributeId),
491 : err.Format());
492 : // Try to encode our error as a status response.
493 2 : err = attributeReportIBs.EncodeAttributeStatus(pathForRetrieval, StatusIB(status.GetStatusCode()));
494 4 : if (err != CHIP_NO_ERROR)
495 : {
496 : // OK, just roll back again and give up; if we still ran out of space we
497 : // will send this status response in the next chunk.
498 0 : attributeReportIBs.Rollback(attributeBackup);
499 : }
500 : }
501 : else
502 : {
503 141 : ChipLogDetail(DataManagement,
504 : "Next attribute value does not fit in packet, roll back on clusterId: " ChipLogFormatMEI
505 : ", attributeId: " ChipLogFormatMEI ", err = %" CHIP_ERROR_FORMAT,
506 : ChipLogValueMEI(pathForRetrieval.mClusterId), ChipLogValueMEI(pathForRetrieval.mAttributeId),
507 : err.Format());
508 : }
509 : }
510 : }
511 4925 : SuccessOrExit(err);
512 : // Successfully encoded the attribute, clear the internal state.
513 4529 : apReadHandler->SetAttributeEncodeState(AttributeEncodeState());
514 1980 : }
515 :
516 : // We just visited all paths interested by this read handler and did not abort in the middle of iteration, there are no more
517 : // chunks for this report.
518 1564 : hasMoreChunks = false;
519 : }
520 1980 : exit:
521 1980 : if (attributeReportIBs.GetWriter()->GetLengthWritten() != emptyReportDataLength)
522 : {
523 : // We may encounter BUFFER_TOO_SMALL with nothing actually written for the case of list chunking, so we check if we have
524 : // actually
525 1309 : attributeDataWritten = true;
526 : }
527 :
528 1980 : if (apHasEncodedData != nullptr)
529 : {
530 1980 : *apHasEncodedData = attributeDataWritten;
531 : }
532 : //
533 : // Running out of space is an error that we're expected to handle - the incompletely written DataIB has already been rolled back
534 : // earlier to ensure only whole and complete DataIBs are present in the stream.
535 : //
536 : // We can safely clear out the error so that the rest of the machinery to close out the reports, etc. will function correctly.
537 : // These are are guaranteed to not fail since we've already reserved memory for the remaining 'close out' TLV operations in this
538 : // function and its callers.
539 : //
540 1980 : if (IsOutOfWriterSpaceError(err) && reservedEndOfReportIBs)
541 : {
542 416 : ChipLogDetail(DataManagement, "<RE:Run> We cannot put more chunks into this report. Enable chunking.");
543 416 : err = CHIP_NO_ERROR;
544 : }
545 :
546 : //
547 : // Only close out the report if we haven't hit an error yet so far.
548 : //
549 3960 : if (err == CHIP_NO_ERROR)
550 : {
551 1980 : TEMPORARY_RETURN_IGNORED attributeReportIBs.GetWriter()->UnreserveBuffer(kReservedSizeEndOfReportIBs);
552 :
553 1980 : err = attributeReportIBs.EndOfAttributeReportIBs();
554 :
555 : //
556 : // We reserved space for this earlier - consequently, the call to end the ReportIBs should
557 : // never fail, so assert if we do since that's a logic bug.
558 : //
559 3960 : VerifyOrDie(err == CHIP_NO_ERROR);
560 : }
561 :
562 : //
563 : // Rollback the the entire ReportIB array if we never wrote any attributes
564 : // AND never hit an error.
565 : //
566 2651 : if (!attributeDataWritten && err == CHIP_NO_ERROR)
567 : {
568 671 : aReportDataBuilder.Rollback(backup);
569 : }
570 :
571 : // hasMoreChunks + no data encoded is a flag that we have encountered some trouble when processing the attribute.
572 : // BuildAndSendSingleReportData will abort the read transaction if we encoded no attribute and no events but hasMoreChunks is
573 : // set.
574 1980 : if (apHasMoreChunks != nullptr)
575 : {
576 1980 : *apHasMoreChunks = hasMoreChunks;
577 : }
578 :
579 1980 : return err;
580 : }
581 :
582 864 : CHIP_ERROR Engine::CheckAccessDeniedEventPaths(TLV::TLVWriter & aWriter, bool & aHasEncodedData, ReadHandler * apReadHandler)
583 : {
584 : using Protocols::InteractionModel::Status;
585 :
586 864 : CHIP_ERROR err = CHIP_NO_ERROR;
587 1759 : for (auto current = apReadHandler->mpEventPathList; current != nullptr;)
588 : {
589 895 : if (current->mValue.IsWildcardPath())
590 : {
591 837 : current = current->mpNext;
592 837 : continue;
593 : }
594 :
595 58 : ConcreteEventPath path(current->mValue.mEndpointId, current->mValue.mClusterId, current->mValue.mEventId);
596 :
597 58 : StatusIB statusIB;
598 :
599 58 : ReturnErrorOnFailure(
600 : CheckEventValidity(path, apReadHandler->GetSubjectDescriptor(), mpImEngine->GetDataModelProvider(), statusIB));
601 :
602 58 : if (statusIB.IsFailure())
603 : {
604 3 : TLV::TLVWriter checkpoint = aWriter;
605 3 : err = EventReportIB::ConstructEventStatusIB(aWriter, path, statusIB);
606 6 : if (err != CHIP_NO_ERROR)
607 : {
608 0 : aWriter = checkpoint;
609 0 : break;
610 : }
611 3 : aHasEncodedData = true;
612 : }
613 :
614 58 : current = current->mpNext;
615 : }
616 :
617 864 : return err;
618 : }
619 :
620 1980 : CHIP_ERROR Engine::BuildSingleReportDataEventReports(ReportDataMessage::Builder & aReportDataBuilder, ReadHandler * apReadHandler,
621 : bool aBufferIsUsed, bool * apHasMoreChunks, bool * apHasEncodedData)
622 : {
623 1980 : CHIP_ERROR err = CHIP_NO_ERROR;
624 1980 : size_t eventCount = 0;
625 1980 : bool hasEncodedStatus = false;
626 1980 : TLV::TLVWriter backup;
627 1980 : bool eventClean = true;
628 1980 : auto & eventMin = apReadHandler->GetEventMin();
629 1980 : bool hasMoreChunks = false;
630 :
631 1980 : aReportDataBuilder.Checkpoint(backup);
632 :
633 1980 : VerifyOrExit(apReadHandler->GetEventPathList() != nullptr, );
634 :
635 : // If the mpEventManagement is not valid or has not been initialized,
636 : // skip the rest of processing
637 891 : VerifyOrExit(mpEventManagement != nullptr && mpEventManagement->IsValid(),
638 : ChipLogError(DataManagement, "EventManagement has not yet initialized"));
639 :
640 888 : eventClean = apReadHandler->CheckEventClean(*mpEventManagement);
641 :
642 : // proceed only if there are new events.
643 888 : if (eventClean)
644 : {
645 24 : ExitNow(); // Read clean, move along
646 : }
647 :
648 : {
649 : // Just like what we do in BuildSingleReportDataAttributeReportIBs(), we need to reserve one byte for end of container tag
650 : // when encoding events to ensure we can close the container successfully.
651 864 : const uint32_t kReservedSizeEndOfReportIBs = 1;
652 864 : EventReportIBs::Builder & eventReportIBs = aReportDataBuilder.CreateEventReports();
653 864 : SuccessOrExit(err = aReportDataBuilder.GetError());
654 864 : VerifyOrExit(eventReportIBs.GetWriter() != nullptr, err = CHIP_ERROR_INCORRECT_STATE);
655 864 : SuccessOrExit(err = eventReportIBs.GetWriter()->ReserveBuffer(kReservedSizeEndOfReportIBs));
656 :
657 864 : err = CheckAccessDeniedEventPaths(*(eventReportIBs.GetWriter()), hasEncodedStatus, apReadHandler);
658 864 : SuccessOrExit(err);
659 :
660 864 : err = mpEventManagement->FetchEventsSince(*(eventReportIBs.GetWriter()), apReadHandler->GetEventPathList(), eventMin,
661 864 : eventCount, apReadHandler->GetSubjectDescriptor());
662 :
663 3456 : if ((err == CHIP_END_OF_TLV) || (err == CHIP_ERROR_TLV_UNDERRUN) || (err == CHIP_NO_ERROR))
664 : {
665 349 : err = CHIP_NO_ERROR;
666 349 : hasMoreChunks = false;
667 : }
668 515 : else if (IsOutOfWriterSpaceError(err))
669 : {
670 : // when first cluster event is too big to fit in the packet, ignore that cluster event.
671 : // However, we may have encoded some attributes before, we don't skip it in that case.
672 515 : if (eventCount == 0)
673 : {
674 206 : if (!aBufferIsUsed)
675 : {
676 0 : eventMin++;
677 : }
678 206 : ChipLogDetail(DataManagement, "<RE:Run> first cluster event is too big so that it fails to fit in the packet!");
679 206 : err = CHIP_NO_ERROR;
680 : }
681 : else
682 : {
683 : // `FetchEventsSince` has filled the available space
684 : // within the allowed buffer before it fit all the
685 : // available events. This is an expected condition,
686 : // so we do not propagate the error to higher levels;
687 : // instead, we terminate the event processing for now
688 309 : err = CHIP_NO_ERROR;
689 : }
690 515 : hasMoreChunks = true;
691 : }
692 : else
693 : {
694 : // All other errors are propagated to higher level.
695 : // Exiting here and returning an error will lead to
696 : // abandoning subscription.
697 0 : ExitNow();
698 : }
699 :
700 864 : SuccessOrExit(err = eventReportIBs.GetWriter()->UnreserveBuffer(kReservedSizeEndOfReportIBs));
701 864 : SuccessOrExit(err = eventReportIBs.EndOfEventReports());
702 : }
703 864 : ChipLogDetail(DataManagement, "Fetched %u events", static_cast<unsigned int>(eventCount));
704 :
705 864 : exit:
706 1980 : if (apHasEncodedData != nullptr)
707 : {
708 1980 : *apHasEncodedData = hasEncodedStatus || (eventCount != 0);
709 : }
710 :
711 : // Maybe encoding the attributes has already used up all space.
712 3960 : if ((err == CHIP_NO_ERROR || IsOutOfWriterSpaceError(err)) && !(hasEncodedStatus || (eventCount != 0)))
713 : {
714 1339 : aReportDataBuilder.Rollback(backup);
715 1339 : err = CHIP_NO_ERROR;
716 : }
717 :
718 : // hasMoreChunks + no data encoded is a flag that we have encountered some trouble when processing the attribute.
719 : // BuildAndSendSingleReportData will abort the read transaction if we encoded no attribute and no events but hasMoreChunks is
720 : // set.
721 1980 : if (apHasMoreChunks != nullptr)
722 : {
723 1980 : *apHasMoreChunks = hasMoreChunks;
724 : }
725 1980 : return err;
726 : }
727 :
728 1980 : CHIP_ERROR Engine::BuildAndSendSingleReportData(ReadHandler * apReadHandler)
729 : {
730 1980 : CHIP_ERROR err = CHIP_NO_ERROR;
731 1980 : System::PacketBufferTLVWriter reportDataWriter;
732 1980 : ReportDataMessage::Builder reportDataBuilder;
733 1980 : System::PacketBufferHandle bufHandle = nullptr;
734 1980 : uint16_t reservedSize = 0;
735 1980 : bool hasMoreChunks = false;
736 1980 : bool needCloseReadHandler = false;
737 1980 : size_t reportBufferMaxSize = 0;
738 :
739 : // Reserved size for the MoreChunks boolean flag, which takes up 1 byte for the control tag and 1 byte for the context tag.
740 1980 : const uint32_t kReservedSizeForMoreChunksFlag = 1 + 1;
741 :
742 : // Reserved size for the uint8_t InteractionModelRevision flag, which takes up 1 byte for the control tag and 1 byte for the
743 : // context tag, 1 byte for value
744 1980 : const uint32_t kReservedSizeForIMRevision = 1 + 1 + 1;
745 :
746 : // Reserved size for the end of report message, which is an end-of-container (i.e 1 byte for the control tag).
747 1980 : const uint32_t kReservedSizeForEndOfReportMessage = 1;
748 :
749 : // Reserved size for an empty EventReportIBs, so we can at least check if there are any events need to be reported.
750 1980 : const uint32_t kReservedSizeForEventReportIBs = 3; // type, tag, end of container
751 :
752 1980 : VerifyOrExit(apReadHandler != nullptr, err = CHIP_ERROR_INVALID_ARGUMENT);
753 1980 : VerifyOrExit(apReadHandler->GetSession() != nullptr, err = CHIP_ERROR_INCORRECT_STATE);
754 :
755 1980 : reportBufferMaxSize = apReadHandler->GetReportBufferMaxSize();
756 :
757 1980 : bufHandle = System::PacketBufferHandle::New(reportBufferMaxSize);
758 1980 : VerifyOrExit(!bufHandle.IsNull(), err = CHIP_ERROR_NO_MEMORY);
759 :
760 1980 : if (bufHandle->AvailableDataLength() > reportBufferMaxSize)
761 : {
762 0 : reservedSize = static_cast<uint16_t>(bufHandle->AvailableDataLength() - reportBufferMaxSize);
763 : }
764 :
765 1980 : reportDataWriter.Init(std::move(bufHandle));
766 :
767 : #if CONFIG_BUILD_FOR_HOST_UNIT_TEST
768 1980 : SuccessOrExit(err = reportDataWriter.ReserveBuffer(mReservedSize));
769 : #endif
770 :
771 : // Always limit the size of the generated packet to fit within the max size returned by the ReadHandler regardless
772 : // of the available buffer capacity.
773 : // Also, we need to reserve some extra space for the MIC field.
774 1980 : SuccessOrExit(
775 : err = reportDataWriter.ReserveBuffer(static_cast<uint32_t>(reservedSize + Crypto::CHIP_CRYPTO_AEAD_MIC_LENGTH_BYTES)));
776 :
777 : // Create a report data.
778 1980 : err = reportDataBuilder.Init(&reportDataWriter);
779 1980 : SuccessOrExit(err);
780 :
781 1980 : if (apReadHandler->IsType(ReadHandler::InteractionType::Subscribe))
782 : {
783 : #if CHIP_CONFIG_ENABLE_ICD_SERVER
784 : // Notify the ICDManager that we are about to send a subscription report before we prepare the Report payload.
785 : // This allows the ICDManager to trigger any necessary updates and have the information in the report about to be sent.
786 : app::ICDNotifier::GetInstance().NotifySubscriptionReport();
787 : #endif // CHIP_CONFIG_ENABLE_ICD_SERVER
788 :
789 435 : SubscriptionId subscriptionId = 0;
790 435 : apReadHandler->GetSubscriptionId(subscriptionId);
791 435 : reportDataBuilder.SubscriptionId(subscriptionId);
792 : }
793 :
794 1980 : SuccessOrExit(err = reportDataWriter.ReserveBuffer(kReservedSizeForMoreChunksFlag + kReservedSizeForIMRevision +
795 : kReservedSizeForEndOfReportMessage + kReservedSizeForEventReportIBs));
796 :
797 : {
798 1980 : bool hasMoreChunksForAttributes = false;
799 1980 : bool hasMoreChunksForEvents = false;
800 1980 : bool hasEncodedAttributes = false;
801 1980 : bool hasEncodedEvents = false;
802 :
803 1980 : err = BuildSingleReportDataAttributeReportIBs(reportDataBuilder, apReadHandler, &hasMoreChunksForAttributes,
804 : &hasEncodedAttributes);
805 2011 : SuccessOrExit(err);
806 1980 : SuccessOrExit(err = reportDataWriter.UnreserveBuffer(kReservedSizeForEventReportIBs));
807 1980 : err = BuildSingleReportDataEventReports(reportDataBuilder, apReadHandler, hasEncodedAttributes, &hasMoreChunksForEvents,
808 : &hasEncodedEvents);
809 1980 : SuccessOrExit(err);
810 :
811 1980 : hasMoreChunks = hasMoreChunksForAttributes || hasMoreChunksForEvents;
812 :
813 1980 : if (!hasEncodedAttributes && !hasEncodedEvents && hasMoreChunks)
814 : {
815 31 : ChipLogError(DataManagement,
816 : "No data actually encoded but hasMoreChunks flag is set, close read handler! (attribute too big?)");
817 31 : err = apReadHandler->SendStatusReport(Protocols::InteractionModel::Status::ResourceExhausted);
818 62 : if (err == CHIP_NO_ERROR)
819 : {
820 31 : needCloseReadHandler = true;
821 : }
822 31 : ExitNow();
823 : }
824 : }
825 :
826 1949 : SuccessOrExit(err = reportDataBuilder.GetError());
827 1949 : SuccessOrExit(err = reportDataWriter.UnreserveBuffer(kReservedSizeForMoreChunksFlag + kReservedSizeForIMRevision +
828 : kReservedSizeForEndOfReportMessage));
829 1949 : if (hasMoreChunks)
830 : {
831 866 : reportDataBuilder.MoreChunkedMessages(true);
832 : }
833 1083 : else if (apReadHandler->IsType(ReadHandler::InteractionType::Read))
834 : {
835 706 : reportDataBuilder.SuppressResponse(true);
836 : }
837 :
838 : //
839 : // Since we've already reserved space for both the MoreChunked/SuppressResponse flags, as well as
840 : // the end-of-container flag for the end of the report, we should never hit an error closing out the message.
841 : //
842 1949 : SuccessOrDie(reportDataBuilder.EndOfReportDataMessage());
843 :
844 1949 : err = reportDataWriter.Finalize(&bufHandle);
845 1949 : SuccessOrExit(err);
846 :
847 1949 : ChipLogDetail(DataManagement, "<RE> Sending report (payload has %" PRIu32 " bytes)...", reportDataWriter.GetLengthWritten());
848 1949 : err = SendReport(apReadHandler, std::move(bufHandle), hasMoreChunks);
849 1949 : SuccessOrExitAction(
850 : err, ChipLogError(DataManagement, "<RE> Error sending out report data with %" CHIP_ERROR_FORMAT "!", err.Format()));
851 :
852 1945 : ChipLogDetail(DataManagement, "<RE> ReportsInFlight = %" PRIu32 " with readHandler %" PRIu32 ", RE has %s", mNumReportsInFlight,
853 : mCurReadHandlerIdx, hasMoreChunks ? "more messages" : "no more messages");
854 :
855 1945 : exit:
856 3960 : if (err != CHIP_NO_ERROR || (apReadHandler->IsType(ReadHandler::InteractionType::Read) && !hasMoreChunks) ||
857 : needCloseReadHandler)
858 : {
859 : //
860 : // In the case of successful report generation and we're on the last chunk of a read, we don't expect
861 : // any further activity on this exchange. The EC layer will automatically close our EC, so shutdown the ReadHandler
862 : // gracefully.
863 : //
864 739 : apReadHandler->Close();
865 : }
866 :
867 3960 : return err;
868 1980 : }
869 :
870 1751 : void Engine::Run(System::Layer * aSystemLayer, void * apAppState)
871 : {
872 1751 : Engine * const pEngine = reinterpret_cast<Engine *>(apAppState);
873 1751 : pEngine->mRunScheduled = false;
874 1751 : pEngine->Run();
875 1751 : }
876 :
877 2149 : CHIP_ERROR Engine::ScheduleRun()
878 : {
879 2149 : if (IsRunScheduled())
880 : {
881 398 : return CHIP_NO_ERROR;
882 : }
883 :
884 1751 : Messaging::ExchangeManager * exchangeManager = mpImEngine->GetExchangeManager();
885 1751 : if (exchangeManager == nullptr)
886 : {
887 0 : return CHIP_ERROR_INCORRECT_STATE;
888 : }
889 1751 : SessionManager * sessionManager = exchangeManager->GetSessionManager();
890 1751 : if (sessionManager == nullptr)
891 : {
892 0 : return CHIP_ERROR_INCORRECT_STATE;
893 : }
894 1751 : System::Layer * systemLayer = sessionManager->SystemLayer();
895 1751 : if (systemLayer == nullptr)
896 : {
897 0 : return CHIP_ERROR_INCORRECT_STATE;
898 : }
899 3502 : ReturnErrorOnFailure(systemLayer->ScheduleWork(Run, this));
900 1751 : mRunScheduled = true;
901 1751 : return CHIP_NO_ERROR;
902 : }
903 :
904 2100 : void Engine::Run()
905 : {
906 2100 : uint32_t numReadHandled = 0;
907 :
908 : // We may be deallocating read handlers as we go. Track how many we had
909 : // initially, so we make sure to go through all of them.
910 2100 : size_t initialAllocated = mpImEngine->mReadHandlers.Allocated();
911 4311 : while ((mNumReportsInFlight < CHIP_IM_MAX_REPORTS_IN_FLIGHT) && (numReadHandled < initialAllocated))
912 : {
913 : ReadHandler * readHandler =
914 2215 : mpImEngine->ActiveHandlerAt(mCurReadHandlerIdx % (uint32_t) mpImEngine->mReadHandlers.Allocated());
915 2215 : VerifyOrDie(readHandler != nullptr);
916 :
917 2215 : if (readHandler->ShouldReportUnscheduled() || mpImEngine->GetReportScheduler()->IsReportableNow(readHandler))
918 : {
919 :
920 1979 : mRunningReadHandler = readHandler;
921 1979 : CHIP_ERROR err = BuildAndSendSingleReportData(readHandler);
922 1979 : mRunningReadHandler = nullptr;
923 3958 : if (err != CHIP_NO_ERROR)
924 : {
925 4 : return;
926 : }
927 : }
928 :
929 2211 : numReadHandled++;
930 : // If readHandler removed itself from our list, we also decremented
931 : // mCurReadHandlerIdx to account for that removal, so it's safe to
932 : // increment here.
933 2211 : mCurReadHandlerIdx++;
934 : }
935 :
936 : //
937 : // If our tracker has exceeded the bounds of the handler list, reset it back to 0.
938 : // This isn't strictly necessary, but does make it easier to debug issues in this code if they
939 : // do arise.
940 : //
941 2096 : if (mCurReadHandlerIdx >= mpImEngine->mReadHandlers.Allocated())
942 : {
943 2039 : mCurReadHandlerIdx = 0;
944 : }
945 :
946 2096 : bool allReadClean = true;
947 :
948 2096 : mpImEngine->mReadHandlers.ForEachActiveObject([&allReadClean](ReadHandler * handler) {
949 2865 : if (handler->IsDirty())
950 : {
951 868 : allReadClean = false;
952 868 : return Loop::Break;
953 : }
954 :
955 1997 : return Loop::Continue;
956 : });
957 :
958 2096 : if (allReadClean)
959 : {
960 1228 : ChipLogDetail(DataManagement, "All ReadHandler-s are clean, clear GlobalDirtySet");
961 :
962 1228 : mGlobalDirtySet.ReleaseAll();
963 : }
964 : }
965 :
966 282 : bool Engine::MergeOverlappedAttributePath(const AttributePathParams & aAttributePath)
967 : {
968 282 : return Loop::Break == mGlobalDirtySet.ForEachActiveObject([&](auto * path) {
969 218 : if (path->IsAttributePathSupersetOf(aAttributePath))
970 : {
971 112 : path->mGeneration = GetDirtySetGeneration();
972 112 : return Loop::Break;
973 : }
974 106 : if (aAttributePath.IsAttributePathSupersetOf(*path))
975 : {
976 : // TODO: the wildcard input path may be superset of next paths in globalDirtySet, it is fine at this moment, since
977 : // when building report, it would use the first path of globalDirtySet to compare against interested paths read clients
978 : // want.
979 : // It is better to eliminate the duplicate wildcard paths in follow-up
980 2 : path->mGeneration = GetDirtySetGeneration();
981 2 : path->mEndpointId = aAttributePath.mEndpointId;
982 2 : path->mClusterId = aAttributePath.mClusterId;
983 2 : path->mListIndex = aAttributePath.mListIndex;
984 2 : path->mAttributeId = aAttributePath.mAttributeId;
985 2 : return Loop::Break;
986 : }
987 104 : return Loop::Continue;
988 282 : });
989 : }
990 :
991 8 : bool Engine::ClearTombPaths()
992 : {
993 8 : bool pathReleased = false;
994 8 : mGlobalDirtySet.ForEachActiveObject([&](auto * path) {
995 64 : if (path->mGeneration.IsZero())
996 : {
997 28 : mGlobalDirtySet.ReleaseObject(path);
998 28 : pathReleased = true;
999 : }
1000 64 : return Loop::Continue;
1001 : });
1002 8 : return pathReleased;
1003 : }
1004 :
1005 5 : bool Engine::MergeDirtyPathsUnderSameCluster()
1006 : {
1007 5 : mGlobalDirtySet.ForEachActiveObject([&](auto * outerPath) {
1008 40 : if (outerPath->HasWildcardClusterId() || outerPath->mGeneration.IsZero())
1009 : {
1010 14 : return Loop::Continue;
1011 : }
1012 26 : mGlobalDirtySet.ForEachActiveObject([&](auto * innerPath) {
1013 208 : if (innerPath == outerPath)
1014 : {
1015 26 : return Loop::Continue;
1016 : }
1017 : // We don't support paths with a wildcard endpoint + a concrete cluster in global dirty set, so we do a simple == check
1018 : // here.
1019 182 : if (innerPath->mEndpointId != outerPath->mEndpointId || innerPath->mClusterId != outerPath->mClusterId)
1020 : {
1021 168 : return Loop::Continue;
1022 : }
1023 14 : if (innerPath->mGeneration.After(outerPath->mGeneration))
1024 : {
1025 0 : outerPath->mGeneration = innerPath->mGeneration;
1026 : }
1027 14 : outerPath->SetWildcardAttributeId();
1028 :
1029 : // The object pool does not allow us to release objects in a nested iteration, mark the path as a tomb by setting its
1030 : // generation to 0 and then clear it later.
1031 14 : innerPath->mGeneration.Clear();
1032 14 : return Loop::Continue;
1033 : });
1034 26 : return Loop::Continue;
1035 : });
1036 :
1037 5 : return ClearTombPaths();
1038 : }
1039 :
1040 3 : bool Engine::MergeDirtyPathsUnderSameEndpoint()
1041 : {
1042 3 : mGlobalDirtySet.ForEachActiveObject([&](auto * outerPath) {
1043 24 : if (outerPath->HasWildcardEndpointId() || outerPath->mGeneration.IsZero())
1044 : {
1045 14 : return Loop::Continue;
1046 : }
1047 10 : mGlobalDirtySet.ForEachActiveObject([&](auto * innerPath) {
1048 80 : if (innerPath == outerPath)
1049 : {
1050 10 : return Loop::Continue;
1051 : }
1052 70 : if (innerPath->mEndpointId != outerPath->mEndpointId)
1053 : {
1054 56 : return Loop::Continue;
1055 : }
1056 14 : if (innerPath->mGeneration.After(outerPath->mGeneration))
1057 : {
1058 0 : outerPath->mGeneration = innerPath->mGeneration;
1059 : }
1060 14 : outerPath->SetWildcardClusterId();
1061 14 : outerPath->SetWildcardAttributeId();
1062 :
1063 : // The object pool does not allow us to release objects in a nested iteration, mark the path as a tomb by setting its
1064 : // generation to 0 and then clear it later.
1065 14 : innerPath->mGeneration.Clear();
1066 14 : return Loop::Continue;
1067 : });
1068 10 : return Loop::Continue;
1069 : });
1070 3 : return ClearTombPaths();
1071 : }
1072 :
1073 192 : CHIP_ERROR Engine::InsertPathIntoDirtySet(const AttributePathParams & aAttributePath)
1074 : {
1075 192 : VerifyOrReturnError(!MergeOverlappedAttributePath(aAttributePath), CHIP_NO_ERROR);
1076 :
1077 85 : if (mGlobalDirtySet.Exhausted() && !MergeDirtyPathsUnderSameCluster() && !MergeDirtyPathsUnderSameEndpoint())
1078 : {
1079 1 : ChipLogDetail(DataManagement, "Global dirty set pool exhausted, merge all paths.");
1080 1 : mGlobalDirtySet.ReleaseAll();
1081 1 : auto object = mGlobalDirtySet.CreateObject();
1082 1 : object->mGeneration = GetDirtySetGeneration();
1083 : }
1084 :
1085 85 : VerifyOrReturnError(!MergeOverlappedAttributePath(aAttributePath), CHIP_NO_ERROR);
1086 82 : ChipLogDetail(DataManagement, "Cannot merge the new path into any existing path, create one.");
1087 :
1088 82 : auto object = mGlobalDirtySet.CreateObject();
1089 82 : if (object == nullptr)
1090 : {
1091 : // This should not happen, this path should be merged into the wildcard endpoint at least.
1092 0 : ChipLogError(DataManagement, "mGlobalDirtySet pool full, cannot handle more entries!");
1093 0 : return CHIP_ERROR_NO_MEMORY;
1094 : }
1095 82 : *object = aAttributePath;
1096 82 : object->mGeneration = GetDirtySetGeneration();
1097 :
1098 82 : return CHIP_NO_ERROR;
1099 : }
1100 :
1101 5554 : CHIP_ERROR Engine::SetDirty(const AttributePathParams & aAttributePath)
1102 : {
1103 5554 : BumpDirtySetGeneration();
1104 :
1105 5554 : bool intersectsInterestPath = false;
1106 5554 : DataModel::Provider * dataModel = mpImEngine->GetDataModelProvider();
1107 5554 : mpImEngine->mReadHandlers.ForEachActiveObject([&dataModel, &aAttributePath, &intersectsInterestPath](ReadHandler * handler) {
1108 : // We call AttributePathIsDirty for both read interactions and subscribe interactions, since we may send inconsistent
1109 : // attribute data between two chunks. AttributePathIsDirty will not schedule a new run for read handlers which are
1110 : // waiting for a response to the last message chunk for read interactions.
1111 481 : if (handler->CanStartReporting() || handler->IsAwaitingReportResponse())
1112 : {
1113 941 : for (auto object = handler->GetAttributePathList(); object != nullptr; object = object->mpNext)
1114 : {
1115 808 : if (object->mValue.Intersects(aAttributePath))
1116 : {
1117 348 : handler->AttributePathIsDirty(dataModel, aAttributePath);
1118 348 : intersectsInterestPath = true;
1119 348 : break;
1120 : }
1121 : }
1122 : }
1123 :
1124 481 : return Loop::Continue;
1125 : });
1126 :
1127 5554 : if (!intersectsInterestPath)
1128 : {
1129 5367 : return CHIP_NO_ERROR;
1130 : }
1131 187 : ReturnErrorOnFailure(InsertPathIntoDirtySet(aAttributePath));
1132 :
1133 187 : return CHIP_NO_ERROR;
1134 : }
1135 :
1136 1949 : CHIP_ERROR Engine::SendReport(ReadHandler * apReadHandler, System::PacketBufferHandle && aPayload, bool aHasMoreChunks)
1137 : {
1138 1949 : CHIP_ERROR err = CHIP_NO_ERROR;
1139 :
1140 : // We can only have 1 report in flight for any given read - increment and break out.
1141 1949 : mNumReportsInFlight++;
1142 1949 : err = apReadHandler->SendReportData(std::move(aPayload), aHasMoreChunks);
1143 3898 : if (err != CHIP_NO_ERROR)
1144 : {
1145 4 : --mNumReportsInFlight;
1146 : }
1147 1949 : return err;
1148 : }
1149 :
1150 1945 : void Engine::OnReportConfirm()
1151 : {
1152 1945 : VerifyOrDie(mNumReportsInFlight > 0);
1153 :
1154 1945 : if (mNumReportsInFlight == CHIP_IM_MAX_REPORTS_IN_FLIGHT)
1155 : {
1156 : // We could have other things waiting to go now that this report is no
1157 : // longer in flight.
1158 61 : TEMPORARY_RETURN_IGNORED ScheduleRun();
1159 : }
1160 1945 : mNumReportsInFlight--;
1161 1945 : ChipLogDetail(DataManagement, "<RE> OnReportConfirm: NumReports = %" PRIu32, mNumReportsInFlight);
1162 1945 : }
1163 :
1164 20 : void Engine::GetMinEventLogPosition(uint32_t & aMinLogPosition)
1165 : {
1166 20 : mpImEngine->mReadHandlers.ForEachActiveObject([&aMinLogPosition](ReadHandler * handler) {
1167 20 : if (handler->IsType(ReadHandler::InteractionType::Read))
1168 : {
1169 0 : return Loop::Continue;
1170 : }
1171 :
1172 20 : uint32_t initialWrittenEventsBytes = handler->GetLastWrittenEventsBytes();
1173 20 : if (initialWrittenEventsBytes < aMinLogPosition)
1174 : {
1175 20 : aMinLogPosition = initialWrittenEventsBytes;
1176 : }
1177 :
1178 20 : return Loop::Continue;
1179 : });
1180 20 : }
1181 :
1182 20 : CHIP_ERROR Engine::ScheduleBufferPressureEventDelivery(uint32_t aBytesWritten)
1183 : {
1184 20 : uint32_t minEventLogPosition = aBytesWritten;
1185 20 : GetMinEventLogPosition(minEventLogPosition);
1186 20 : if (aBytesWritten - minEventLogPosition > CHIP_CONFIG_EVENT_LOGGING_BYTE_THRESHOLD)
1187 : {
1188 0 : ChipLogDetail(DataManagement, "<RE> Buffer overfilled CHIP_CONFIG_EVENT_LOGGING_BYTE_THRESHOLD %d, schedule engine run",
1189 : CHIP_CONFIG_EVENT_LOGGING_BYTE_THRESHOLD);
1190 0 : return ScheduleRun();
1191 : }
1192 20 : return CHIP_NO_ERROR;
1193 : }
1194 :
1195 665 : CHIP_ERROR Engine::NewEventGenerated(ConcreteEventPath & aPath, uint32_t aBytesConsumed)
1196 : {
1197 : // If we literally have no read handlers right now that care about any events,
1198 : // we don't need to call schedule run for event.
1199 : // If schedule run is called, actually we would not delivery events as well.
1200 : // Just wanna save one schedule run here
1201 665 : if (mpImEngine->mEventPathPool.Allocated() == 0)
1202 : {
1203 633 : return CHIP_NO_ERROR;
1204 : }
1205 :
1206 32 : bool isUrgentEvent = false;
1207 32 : mpImEngine->mReadHandlers.ForEachActiveObject([&aPath, &isUrgentEvent](ReadHandler * handler) {
1208 40 : if (handler->IsType(ReadHandler::InteractionType::Read))
1209 : {
1210 0 : return Loop::Continue;
1211 : }
1212 :
1213 104 : for (auto * interestedPath = handler->GetEventPathList(); interestedPath != nullptr;
1214 64 : interestedPath = interestedPath->mpNext)
1215 : {
1216 76 : if (interestedPath->mValue.IsEventPathSupersetOf(aPath) && interestedPath->mValue.mIsUrgentEvent)
1217 : {
1218 12 : isUrgentEvent = true;
1219 12 : handler->ForceDirtyState();
1220 12 : break;
1221 : }
1222 : }
1223 :
1224 40 : return Loop::Continue;
1225 : });
1226 :
1227 32 : if (isUrgentEvent)
1228 : {
1229 12 : ChipLogDetail(DataManagement, "Urgent event will be sent once reporting is not blocked by the min interval");
1230 12 : return CHIP_NO_ERROR;
1231 : }
1232 :
1233 20 : return ScheduleBufferPressureEventDelivery(aBytesConsumed);
1234 : }
1235 :
1236 349 : void Engine::ScheduleUrgentEventDeliverySync(Optional<FabricIndex> fabricIndex)
1237 : {
1238 349 : mpImEngine->mReadHandlers.ForEachActiveObject([fabricIndex](ReadHandler * handler) {
1239 0 : if (handler->IsType(ReadHandler::InteractionType::Read))
1240 : {
1241 0 : return Loop::Continue;
1242 : }
1243 :
1244 0 : if (fabricIndex.HasValue() && fabricIndex.Value() != handler->GetAccessingFabricIndex())
1245 : {
1246 0 : return Loop::Continue;
1247 : }
1248 :
1249 0 : handler->ForceDirtyState();
1250 :
1251 0 : return Loop::Continue;
1252 : });
1253 :
1254 349 : Run();
1255 349 : }
1256 :
1257 5329 : void Engine::OnAttributeChanged(const ConcreteAttributePath & path, DataModel::AttributeChangeType type)
1258 : {
1259 5329 : VerifyOrReturn(type == DataModel::AttributeChangeType::kReportable);
1260 :
1261 5329 : CHIP_ERROR err = SetDirty({ path.mEndpointId, path.mClusterId, path.mAttributeId });
1262 10658 : if (err != CHIP_NO_ERROR)
1263 : {
1264 0 : ChipLogError(DataManagement, "Failed to set path dirty: %" CHIP_ERROR_FORMAT, err.Format());
1265 : }
1266 : }
1267 :
1268 45 : void Engine::OnEndpointChanged(EndpointId endpointId, DataModel::EndpointChangeType type)
1269 : {
1270 45 : CHIP_ERROR err = SetDirty(AttributePathParams(endpointId));
1271 90 : if (err != CHIP_NO_ERROR)
1272 : {
1273 0 : ChipLogError(DataManagement, "Failed to set endpoint %u dirty: %" CHIP_ERROR_FORMAT, endpointId, err.Format());
1274 : }
1275 45 : }
1276 :
1277 : } // namespace reporting
1278 : } // namespace app
1279 : } // namespace chip
|