Line data Source code
1 : /*
2 : *
3 : * Copyright (c) 2021 Project CHIP Authors
4 : * All rights reserved.
5 : *
6 : * Licensed under the Apache License, Version 2.0 (the "License");
7 : * you may not use this file except in compliance with the License.
8 : * You may obtain a copy of the License at
9 : *
10 : * http://www.apache.org/licenses/LICENSE-2.0
11 : *
12 : * Unless required by applicable law or agreed to in writing, software
13 : * distributed under the License is distributed on an "AS IS" BASIS,
14 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 : * See the License for the specific language governing permissions and
16 : * limitations under the License.
17 : */
18 :
19 : #include <access/AccessRestrictionProvider.h>
20 : #include <access/Privilege.h>
21 : #include <app/AppConfig.h>
22 : #include <app/AttributePathExpandIterator.h>
23 : #include <app/ConcreteEventPath.h>
24 : #include <app/GlobalAttributes.h>
25 : #include <app/InteractionModelEngine.h>
26 : #include <app/MessageDef/StatusIB.h>
27 : #include <app/data-model-provider/ActionReturnStatus.h>
28 : #include <app/data-model-provider/MetadataLookup.h>
29 : #include <app/data-model-provider/MetadataTypes.h>
30 : #include <app/data-model-provider/Provider.h>
31 : #include <app/icd/server/ICDServerConfig.h>
32 : #include <app/reporting/Engine.h>
33 : #include <app/reporting/reporting.h>
34 : #include <app/util/MatterCallbacks.h>
35 : #include <lib/core/CHIPError.h>
36 : #include <lib/core/DataModelTypes.h>
37 : #include <lib/support/CodeUtils.h>
38 : #include <protocols/interaction_model/StatusCode.h>
39 :
40 : #include <optional>
41 :
42 : #if CHIP_CONFIG_ENABLE_ICD_SERVER
43 : #include <app/icd/server/ICDNotifier.h> // nogncheck
44 : #endif
45 :
46 : using namespace chip::Access;
47 :
48 : namespace chip {
49 : namespace app {
50 : namespace reporting {
51 : namespace {
52 :
53 : using DataModel::ReadFlags;
54 : using Protocols::InteractionModel::Status;
55 :
56 : /// Returns the status of ACL validation.
57 : /// If the return value has a status set, that means the ACL check failed,
58 : /// the read must not be performed, and the returned status (which may
59 : /// be success, when dealing with non-concrete paths) should be used
60 : /// as the status for the read.
61 : ///
62 : /// If the returned value is std::nullopt, that means the ACL check passed and the
63 : /// read should proceed.
64 9848 : std::optional<CHIP_ERROR> ValidateReadAttributeACL(const SubjectDescriptor & subjectDescriptor,
65 : const ConcreteReadAttributePath & path, Privilege requiredPrivilege)
66 : {
67 :
68 9848 : RequestPath requestPath{ .cluster = path.mClusterId,
69 9848 : .endpoint = path.mEndpointId,
70 : .requestType = RequestType::kAttributeReadRequest,
71 9848 : .entityId = path.mAttributeId };
72 :
73 9848 : CHIP_ERROR err = GetAccessControl().Check(subjectDescriptor, requestPath, requiredPrivilege);
74 9848 : if (err == CHIP_NO_ERROR)
75 : {
76 9847 : return std::nullopt;
77 : }
78 1 : VerifyOrReturnError((err == CHIP_ERROR_ACCESS_DENIED) || (err == CHIP_ERROR_ACCESS_RESTRICTED_BY_ARL), err);
79 :
80 : // Implementation of 8.4.3.2 of the spec for path expansion
81 1 : if (path.mExpanded)
82 : {
83 0 : return CHIP_NO_ERROR;
84 : }
85 :
86 : // access denied and access restricted have specific codes for IM
87 1 : return err == CHIP_ERROR_ACCESS_DENIED ? CHIP_IM_GLOBAL_STATUS(UnsupportedAccess) : CHIP_IM_GLOBAL_STATUS(AccessRestricted);
88 : }
89 :
90 : /// Checks that the given attribute path corresponds to a readable attribute. If not, it
91 : /// will return the corresponding failure status.
92 4924 : std::optional<Status> ValidateAttributeIsReadable(DataModel::Provider * dataModel, const ConcreteReadAttributePath & path,
93 : const std::optional<DataModel::AttributeEntry> & entry)
94 : {
95 4924 : if (!entry.has_value())
96 : {
97 1 : return DataModel::ValidateClusterPath(dataModel, path, Status::UnsupportedAttribute);
98 : }
99 :
100 4923 : if (!entry->GetReadPrivilege().has_value())
101 : {
102 0 : return Status::UnsupportedRead;
103 : }
104 :
105 4923 : return std::nullopt;
106 : }
107 :
108 4925 : DataModel::ActionReturnStatus RetrieveClusterData(DataModel::Provider * dataModel, const SubjectDescriptor & subjectDescriptor,
109 : BitFlags<ReadFlags> flags, AttributeReportIBs::Builder & reportBuilder,
110 : const ConcreteReadAttributePath & path, AttributeEncodeState * encoderState)
111 : {
112 4925 : ChipLogDetail(DataManagement, "<RE:Run> Cluster %" PRIx32 ", Attribute %" PRIx32 " is dirty", path.mClusterId,
113 : path.mAttributeId);
114 4925 : DataModelCallbacks::GetInstance()->AttributeOperation(DataModelCallbacks::OperationType::Read,
115 : DataModelCallbacks::OperationOrder::Pre, path);
116 :
117 4925 : DataModel::ReadAttributeRequest readRequest;
118 :
119 4925 : readRequest.readFlags = flags;
120 4925 : readRequest.subjectDescriptor = &subjectDescriptor;
121 4925 : readRequest.path = path;
122 :
123 4925 : DataModel::ServerClusterFinder serverClusterFinder(dataModel);
124 :
125 4925 : DataVersion version = 0;
126 4925 : if (auto clusterInfo = serverClusterFinder.Find(path); clusterInfo.has_value())
127 : {
128 4923 : version = clusterInfo->dataVersion;
129 : }
130 : else
131 : {
132 2 : ChipLogError(DataManagement, "Read request on unknown cluster - no data version available");
133 : }
134 :
135 4925 : TLV::TLVWriter checkpoint;
136 4925 : reportBuilder.Checkpoint(checkpoint);
137 :
138 4925 : DataModel::ActionReturnStatus status(CHIP_NO_ERROR);
139 4925 : bool isFabricFiltered = flags.Has(ReadFlags::kFabricFiltered);
140 4925 : AttributeValueEncoder attributeValueEncoder(reportBuilder, subjectDescriptor, path, version, isFabricFiltered, encoderState);
141 :
142 : // TODO: we explicitly DO NOT validate that path is a valid cluster path (even more, above serverClusterFinder
143 : // explicitly ignores that case).
144 : // Validation of attribute existence is done after ACL, in `ValidateAttributeIsReadable` below
145 : //
146 : // See https://github.com/project-chip/connectedhomeip/issues/37410
147 :
148 : // Execute the ACL Access Granting Algorithm before existence checks, assuming the required_privilege for the element is
149 : // View, to determine if the subject would have had at least some access against the concrete path. This is done so we don't
150 : // leak information if we do fail existence checks.
151 :
152 4925 : DataModel::AttributeFinder finder(dataModel);
153 4925 : std::optional<DataModel::AttributeEntry> entry = finder.Find(path);
154 :
155 4925 : if (auto access_status = ValidateReadAttributeACL(subjectDescriptor, path, Privilege::kView); access_status.has_value())
156 : {
157 1 : status = *access_status;
158 : }
159 4924 : else if (auto readable_status = ValidateAttributeIsReadable(dataModel, path, entry); readable_status.has_value())
160 : {
161 1 : status = *readable_status;
162 : }
163 : // Execute the ACL Access Granting Algorithm against the concrete path a second time, using the actual required_privilege.
164 : // entry->GetReadPrivilege() is guaranteed to have a value, since that condition is checked in the previous condition (inside
165 : // ValidateAttributeIsReadable()).
166 : // NOLINTNEXTLINE(bugprone-unchecked-optional-access)
167 9846 : else if (auto required_privilege_status = ValidateReadAttributeACL(subjectDescriptor, path, entry->GetReadPrivilege().value());
168 4923 : required_privilege_status.has_value())
169 : {
170 0 : status = *required_privilege_status;
171 : }
172 4923 : else if (IsSupportedGlobalAttributeNotInMetadata(readRequest.path.mAttributeId))
173 : {
174 : // Global attributes are NOT directly handled by data model providers, instead
175 : // they are routed through metadata.
176 1402 : status = ReadGlobalAttributeFromMetadata(dataModel, readRequest.path, attributeValueEncoder);
177 : }
178 : else
179 : {
180 3521 : status = dataModel->ReadAttribute(readRequest, attributeValueEncoder);
181 : }
182 :
183 4925 : if (status.IsSuccess())
184 : {
185 : // TODO: this callback being only executed on success is awkward. The Write callback is always done
186 : // for both read and write.
187 : //
188 : // For now this preserves existing/previous code logic, however we should consider to ALWAYS
189 : // call this.
190 4527 : DataModelCallbacks::GetInstance()->AttributeOperation(DataModelCallbacks::OperationType::Read,
191 : DataModelCallbacks::OperationOrder::Post, path);
192 4527 : return status;
193 : }
194 :
195 : // Encoder state is relevant for errors in case they are retryable.
196 : //
197 : // Generally only out of space encoding errors would be retryable, however we save the state
198 : // for all errors in case this is information that is useful (retry or error position).
199 398 : if (encoderState != nullptr)
200 : {
201 398 : *encoderState = attributeValueEncoder.GetState();
202 : }
203 :
204 : #if CHIP_CONFIG_DATA_MODEL_EXTRA_LOGGING
205 : // Out of space errors may be chunked data, reporting those cases would be very confusing
206 : // as they are not fully errors. Report only others (which presumably are not recoverable
207 : // and will be sent to the client as well).
208 398 : if (!status.IsOutOfSpaceEncodingResponse())
209 : {
210 2 : DataModel::ActionReturnStatus::StringStorage storage;
211 2 : ChipLogError(DataManagement, "Failed to read attribute: %s", status.c_str(storage));
212 : }
213 : #endif
214 398 : return status;
215 4925 : }
216 :
217 109 : bool IsClusterDataVersionEqualTo(DataModel::Provider * dataModel, const ConcreteClusterPath & path, DataVersion dataVersion)
218 : {
219 109 : DataModel::ServerClusterFinder serverClusterFinder(dataModel);
220 109 : auto info = serverClusterFinder.Find(path);
221 :
222 109 : return info.has_value() && (info->dataVersion == dataVersion);
223 109 : }
224 :
225 : /// Check if the given `err` is a known ACL error that can be translated into
226 : /// a StatusIB (UnsupportedAccess/AccessRestricted)
227 : ///
228 : /// Returns true if the error could be translated and places the result into `outStatus`.
229 : /// `path` is used for logging.
230 110 : bool IsTranslatableAclError(const ConcreteEventPath & path, const CHIP_ERROR & err, StatusIB & outStatus)
231 : {
232 110 : if ((err != CHIP_ERROR_ACCESS_DENIED) && (err != CHIP_ERROR_ACCESS_RESTRICTED_BY_ARL))
233 : {
234 108 : return false;
235 : }
236 :
237 2 : ChipLogDetail(InteractionModel, "Access to event (%u, " ChipLogFormatMEI ", " ChipLogFormatMEI ") denied by %s",
238 : path.mEndpointId, ChipLogValueMEI(path.mClusterId), ChipLogValueMEI(path.mEventId),
239 : err == CHIP_ERROR_ACCESS_DENIED ? "ACL" : "ARL");
240 :
241 2 : outStatus = err == CHIP_ERROR_ACCESS_DENIED ? StatusIB(Status::UnsupportedAccess) : StatusIB(Status::AccessRestricted);
242 2 : return true;
243 : }
244 :
245 56 : CHIP_ERROR CheckEventValidity(const ConcreteEventPath & path, const SubjectDescriptor & subjectDescriptor,
246 : DataModel::Provider * provider, StatusIB & outStatus)
247 : {
248 : // We validate ACL before Path, however this means we do not want the real ACL check
249 : // to be blocked by a `Invalid endpoint id` error when checking event info.
250 : // As a result, we check for VIEW privilege on the cluster first (most permissive)
251 : // and will do a 2nd check for the actual required privilege as a followup.
252 56 : RequestPath requestPath{
253 56 : .cluster = path.mClusterId,
254 56 : .endpoint = path.mEndpointId,
255 : .requestType = RequestType::kEventReadRequest,
256 56 : };
257 56 : CHIP_ERROR err = GetAccessControl().Check(subjectDescriptor, requestPath, Access::Privilege::kView);
258 56 : if (IsTranslatableAclError(path, err, outStatus))
259 : {
260 2 : return CHIP_NO_ERROR;
261 : }
262 54 : ReturnErrorOnFailure(err);
263 :
264 : DataModel::EventEntry eventInfo;
265 54 : err = provider->EventInfo(path, eventInfo);
266 54 : if (err != CHIP_NO_ERROR)
267 : {
268 : // cannot get event data to validate. Event is not supported.
269 : // we still fall through into "ValidateClusterPath" to try to return a `better code`
270 : // (i.e. say invalid endpoint or cluser), however if path seems ok we will
271 : // return unsupported event as we failed to get event metadata.
272 0 : outStatus = StatusIB(Status::UnsupportedEvent);
273 : }
274 : else
275 : {
276 : // set up the status as "OK" as long as validation below works
277 54 : outStatus = StatusIB(Status::Success);
278 :
279 54 : requestPath.entityId = path.mEventId;
280 :
281 54 : err = GetAccessControl().Check(subjectDescriptor, requestPath, eventInfo.readPrivilege);
282 54 : if (IsTranslatableAclError(path, err, outStatus))
283 : {
284 0 : return CHIP_NO_ERROR;
285 : }
286 54 : ReturnErrorOnFailure(err);
287 : }
288 :
289 54 : Status status = DataModel::ValidateClusterPath(provider, path, Status::Success);
290 54 : if (status != Status::Success)
291 : {
292 : // a valid status available: failure
293 0 : outStatus = StatusIB(status);
294 0 : return CHIP_NO_ERROR;
295 : }
296 :
297 : // Status set above: could be success, but also UnsupportedEvent
298 54 : return CHIP_NO_ERROR;
299 : }
300 :
301 : } // namespace
302 :
303 79 : Engine::Engine(InteractionModelEngine * apImEngine) : mpImEngine(apImEngine) {}
304 :
305 436 : CHIP_ERROR Engine::Init(EventManagement * apEventManagement)
306 : {
307 436 : VerifyOrReturnError(apEventManagement != nullptr, CHIP_ERROR_INVALID_ARGUMENT);
308 436 : mNumReportsInFlight = 0;
309 436 : mCurReadHandlerIdx = 0;
310 436 : mpEventManagement = apEventManagement;
311 :
312 436 : return CHIP_NO_ERROR;
313 : }
314 :
315 311 : void Engine::Shutdown()
316 : {
317 : // Flush out the event buffer synchronously
318 311 : ScheduleUrgentEventDeliverySync();
319 :
320 311 : mNumReportsInFlight = 0;
321 311 : mCurReadHandlerIdx = 0;
322 311 : mGlobalDirtySet.ReleaseAll();
323 311 : }
324 :
325 4722 : bool Engine::IsClusterDataVersionMatch(const SingleLinkedListNode<DataVersionFilter> * aDataVersionFilterList,
326 : const ConcreteReadAttributePath & aPath)
327 : {
328 4722 : bool existPathMatch = false;
329 4722 : bool existVersionMismatch = false;
330 43484 : for (auto filter = aDataVersionFilterList; filter != nullptr; filter = filter->mpNext)
331 : {
332 38762 : if (aPath.mEndpointId == filter->mValue.mEndpointId && aPath.mClusterId == filter->mValue.mClusterId)
333 : {
334 109 : existPathMatch = true;
335 :
336 109 : if (!IsClusterDataVersionEqualTo(mpImEngine->GetDataModelProvider(),
337 218 : ConcreteClusterPath(filter->mValue.mEndpointId, filter->mValue.mClusterId),
338 109 : filter->mValue.mDataVersion.Value()))
339 : {
340 79 : existVersionMismatch = true;
341 : }
342 : }
343 : }
344 4722 : return existPathMatch && !existVersionMismatch;
345 : }
346 :
347 2500 : static bool IsOutOfWriterSpaceError(CHIP_ERROR err)
348 : {
349 2500 : return err == CHIP_ERROR_NO_MEMORY || err == CHIP_ERROR_BUFFER_TOO_SMALL;
350 : }
351 :
352 1985 : CHIP_ERROR Engine::BuildSingleReportDataAttributeReportIBs(ReportDataMessage::Builder & aReportDataBuilder,
353 : ReadHandler * apReadHandler, bool * apHasMoreChunks,
354 : bool * apHasEncodedData)
355 : {
356 1985 : CHIP_ERROR err = CHIP_NO_ERROR;
357 1985 : bool attributeDataWritten = false;
358 1985 : bool hasMoreChunks = true;
359 1985 : TLV::TLVWriter backup;
360 1985 : const uint32_t kReservedSizeEndOfReportIBs = 1;
361 1985 : bool reservedEndOfReportIBs = false;
362 :
363 1985 : aReportDataBuilder.Checkpoint(backup);
364 :
365 1985 : AttributeReportIBs::Builder & attributeReportIBs = aReportDataBuilder.CreateAttributeReportIBs();
366 1985 : size_t emptyReportDataLength = 0;
367 :
368 1985 : SuccessOrExit(err = aReportDataBuilder.GetError());
369 :
370 1985 : emptyReportDataLength = attributeReportIBs.GetWriter()->GetLengthWritten();
371 : //
372 : // Reserve enough space for closing out the Report IB list
373 : //
374 1985 : SuccessOrExit(err = attributeReportIBs.GetWriter()->ReserveBuffer(kReservedSizeEndOfReportIBs));
375 1985 : reservedEndOfReportIBs = true;
376 :
377 : {
378 : // TODO: Figure out how AttributePathExpandIterator should handle read
379 : // vs write paths.
380 1985 : ConcreteAttributePath readPath;
381 :
382 1985 : ChipLogDetail(DataManagement,
383 : "Building Reports for ReadHandler with LastReportGeneration = 0x" ChipLogFormatX64
384 : " DirtyGeneration = 0x" ChipLogFormatX64,
385 : ChipLogValueX64(apReadHandler->mPreviousReportsBeginGeneration),
386 : ChipLogValueX64(apReadHandler->mDirtyGeneration));
387 :
388 : // This ReadHandler is not generating reports, so we reset the iterator for a clean start.
389 1985 : if (!apReadHandler->IsReporting())
390 : {
391 1173 : apReadHandler->ResetPathIterator();
392 : }
393 :
394 : #if CONFIG_BUILD_FOR_HOST_UNIT_TEST
395 1985 : uint32_t attributesRead = 0;
396 : #endif
397 :
398 : // For each path included in the interested path of the read handler...
399 1985 : for (RollbackAttributePathExpandIterator iterator(mpImEngine->GetDataModelProvider(),
400 1985 : apReadHandler->AttributeIterationPosition());
401 6969 : iterator.Next(readPath); iterator.MarkCompleted())
402 : {
403 5400 : if (!apReadHandler->IsPriming())
404 : {
405 678 : bool concretePathDirty = false;
406 : // TODO: Optimize this implementation by making the iterator only emit intersected paths.
407 678 : mGlobalDirtySet.ForEachActiveObject([&](auto * dirtyPath) {
408 815 : if (dirtyPath->IsAttributePathSupersetOf(readPath))
409 : {
410 : // We don't need to worry about paths that were already marked dirty before the last time this read handler
411 : // started a report that it completed: those paths already got reported.
412 252 : if (dirtyPath->mGeneration > apReadHandler->mPreviousReportsBeginGeneration)
413 : {
414 249 : concretePathDirty = true;
415 249 : return Loop::Break;
416 : }
417 : }
418 566 : return Loop::Continue;
419 : });
420 :
421 678 : if (!concretePathDirty)
422 : {
423 : // This attribute is not dirty, we just skip this one.
424 429 : continue;
425 : }
426 : }
427 : else
428 : {
429 4722 : if (IsClusterDataVersionMatch(apReadHandler->GetDataVersionFilterList(), readPath))
430 : {
431 26 : continue;
432 : }
433 : }
434 :
435 : #if CONFIG_BUILD_FOR_HOST_UNIT_TEST
436 4945 : attributesRead++;
437 4945 : if (attributesRead > mMaxAttributesPerChunk)
438 : {
439 416 : ExitNow(err = CHIP_ERROR_BUFFER_TOO_SMALL);
440 : }
441 : #endif
442 :
443 : // If we are processing a read request, or the initial report of a subscription, just regard all paths as dirty
444 : // paths.
445 4925 : TLV::TLVWriter attributeBackup;
446 4925 : attributeReportIBs.Checkpoint(attributeBackup);
447 4925 : ConcreteReadAttributePath pathForRetrieval(readPath);
448 : // Load the saved state from previous encoding session for chunking of one single attribute (list chunking).
449 4925 : AttributeEncodeState encodeState = apReadHandler->GetAttributeEncodeState();
450 4925 : BitFlags<ReadFlags> flags;
451 4925 : flags.Set(ReadFlags::kFabricFiltered, apReadHandler->IsFabricFiltered());
452 4925 : flags.Set(ReadFlags::kAllowsLargePayload, apReadHandler->AllowsLargePayload());
453 : DataModel::ActionReturnStatus status =
454 4925 : RetrieveClusterData(mpImEngine->GetDataModelProvider(), apReadHandler->GetSubjectDescriptor(), flags,
455 : attributeReportIBs, pathForRetrieval, &encodeState);
456 4925 : if (status.IsError())
457 : {
458 : // Operation error set, since this will affect early return or override on status encoding
459 : // it will also be used for error reporting below.
460 398 : err = status.GetUnderlyingError();
461 :
462 : // If error is not an "out of writer space" error, rollback and encode status.
463 : // Otherwise, if partial data allowed, save the encode state.
464 : // Otherwise roll back. If we have already encoded some chunks, we are done; otherwise encode status.
465 :
466 398 : if (encodeState.AllowPartialData() && status.IsOutOfSpaceEncodingResponse())
467 : {
468 255 : ChipLogDetail(DataManagement,
469 : "List does not fit in packet, chunk between list items for clusterId: " ChipLogFormatMEI
470 : ", attributeId: " ChipLogFormatMEI,
471 : ChipLogValueMEI(pathForRetrieval.mClusterId), ChipLogValueMEI(pathForRetrieval.mAttributeId));
472 : // Encoding is aborted but partial data is allowed, then we don't rollback and save the state for next chunk.
473 : // The expectation is that RetrieveClusterData has already reset attributeReportIBs to a good state (rolled
474 : // back any partially-written AttributeReportIB instances, reset its error status). Since AllowPartialData()
475 : // is true, we may not have encoded a complete attribute value, but we did, if we encoded anything, encode a
476 : // set of complete AttributeReportIB instances that represent part of the attribute value.
477 255 : apReadHandler->SetAttributeEncodeState(encodeState);
478 : }
479 : else
480 : {
481 : // We met a error during writing reports, one common case is we are running out of buffer, rollback the
482 : // attributeReportIB to avoid any partial data.
483 143 : attributeReportIBs.Rollback(attributeBackup);
484 143 : apReadHandler->SetAttributeEncodeState(AttributeEncodeState());
485 :
486 143 : if (!status.IsOutOfSpaceEncodingResponse())
487 : {
488 2 : ChipLogError(DataManagement,
489 : "Fail to retrieve data, roll back and encode status on clusterId: " ChipLogFormatMEI
490 : ", attributeId: " ChipLogFormatMEI "err = %" CHIP_ERROR_FORMAT,
491 : ChipLogValueMEI(pathForRetrieval.mClusterId), ChipLogValueMEI(pathForRetrieval.mAttributeId),
492 : err.Format());
493 : // Try to encode our error as a status response.
494 2 : err = attributeReportIBs.EncodeAttributeStatus(pathForRetrieval, StatusIB(status.GetStatusCode()));
495 2 : if (err != CHIP_NO_ERROR)
496 : {
497 : // OK, just roll back again and give up; if we still ran out of space we
498 : // will send this status response in the next chunk.
499 0 : attributeReportIBs.Rollback(attributeBackup);
500 : }
501 : }
502 : else
503 : {
504 141 : ChipLogDetail(DataManagement,
505 : "Next attribute value does not fit in packet, roll back on clusterId: " ChipLogFormatMEI
506 : ", attributeId: " ChipLogFormatMEI ", err = %" CHIP_ERROR_FORMAT,
507 : ChipLogValueMEI(pathForRetrieval.mClusterId), ChipLogValueMEI(pathForRetrieval.mAttributeId),
508 : err.Format());
509 : }
510 : }
511 : }
512 4925 : SuccessOrExit(err);
513 : // Successfully encoded the attribute, clear the internal state.
514 4529 : apReadHandler->SetAttributeEncodeState(AttributeEncodeState());
515 1985 : }
516 :
517 : // We just visited all paths interested by this read handler and did not abort in the middle of iteration, there are no more
518 : // chunks for this report.
519 1569 : hasMoreChunks = false;
520 : }
521 1985 : exit:
522 1985 : if (attributeReportIBs.GetWriter()->GetLengthWritten() != emptyReportDataLength)
523 : {
524 : // We may encounter BUFFER_TOO_SMALL with nothing actually written for the case of list chunking, so we check if we have
525 : // actually
526 1309 : attributeDataWritten = true;
527 : }
528 :
529 1985 : if (apHasEncodedData != nullptr)
530 : {
531 1985 : *apHasEncodedData = attributeDataWritten;
532 : }
533 : //
534 : // Running out of space is an error that we're expected to handle - the incompletely written DataIB has already been rolled back
535 : // earlier to ensure only whole and complete DataIBs are present in the stream.
536 : //
537 : // We can safely clear out the error so that the rest of the machinery to close out the reports, etc. will function correctly.
538 : // These are are guaranteed to not fail since we've already reserved memory for the remaining 'close out' TLV operations in this
539 : // function and its callers.
540 : //
541 1985 : if (IsOutOfWriterSpaceError(err) && reservedEndOfReportIBs)
542 : {
543 416 : ChipLogDetail(DataManagement, "<RE:Run> We cannot put more chunks into this report. Enable chunking.");
544 416 : err = CHIP_NO_ERROR;
545 : }
546 :
547 : //
548 : // Only close out the report if we haven't hit an error yet so far.
549 : //
550 1985 : if (err == CHIP_NO_ERROR)
551 : {
552 1985 : attributeReportIBs.GetWriter()->UnreserveBuffer(kReservedSizeEndOfReportIBs);
553 :
554 1985 : err = attributeReportIBs.EndOfAttributeReportIBs();
555 :
556 : //
557 : // We reserved space for this earlier - consequently, the call to end the ReportIBs should
558 : // never fail, so assert if we do since that's a logic bug.
559 : //
560 1985 : VerifyOrDie(err == CHIP_NO_ERROR);
561 : }
562 :
563 : //
564 : // Rollback the the entire ReportIB array if we never wrote any attributes
565 : // AND never hit an error.
566 : //
567 1985 : if (!attributeDataWritten && err == CHIP_NO_ERROR)
568 : {
569 676 : aReportDataBuilder.Rollback(backup);
570 : }
571 :
572 : // hasMoreChunks + no data encoded is a flag that we have encountered some trouble when processing the attribute.
573 : // BuildAndSendSingleReportData will abort the read transaction if we encoded no attribute and no events but hasMoreChunks is
574 : // set.
575 1985 : if (apHasMoreChunks != nullptr)
576 : {
577 1985 : *apHasMoreChunks = hasMoreChunks;
578 : }
579 :
580 1985 : return err;
581 : }
582 :
583 863 : CHIP_ERROR Engine::CheckAccessDeniedEventPaths(TLV::TLVWriter & aWriter, bool & aHasEncodedData, ReadHandler * apReadHandler)
584 : {
585 : using Protocols::InteractionModel::Status;
586 :
587 863 : CHIP_ERROR err = CHIP_NO_ERROR;
588 1756 : for (auto current = apReadHandler->mpEventPathList; current != nullptr;)
589 : {
590 893 : if (current->mValue.IsWildcardPath())
591 : {
592 837 : current = current->mpNext;
593 837 : continue;
594 : }
595 :
596 56 : ConcreteEventPath path(current->mValue.mEndpointId, current->mValue.mClusterId, current->mValue.mEventId);
597 :
598 56 : StatusIB statusIB;
599 :
600 56 : ReturnErrorOnFailure(
601 : CheckEventValidity(path, apReadHandler->GetSubjectDescriptor(), mpImEngine->GetDataModelProvider(), statusIB));
602 :
603 56 : if (statusIB.IsFailure())
604 : {
605 2 : TLV::TLVWriter checkpoint = aWriter;
606 2 : err = EventReportIB::ConstructEventStatusIB(aWriter, path, statusIB);
607 2 : if (err != CHIP_NO_ERROR)
608 : {
609 0 : aWriter = checkpoint;
610 0 : break;
611 : }
612 2 : aHasEncodedData = true;
613 : }
614 :
615 56 : current = current->mpNext;
616 : }
617 :
618 863 : return err;
619 : }
620 :
621 1985 : CHIP_ERROR Engine::BuildSingleReportDataEventReports(ReportDataMessage::Builder & aReportDataBuilder, ReadHandler * apReadHandler,
622 : bool aBufferIsUsed, bool * apHasMoreChunks, bool * apHasEncodedData)
623 : {
624 1985 : CHIP_ERROR err = CHIP_NO_ERROR;
625 1985 : size_t eventCount = 0;
626 1985 : bool hasEncodedStatus = false;
627 1985 : TLV::TLVWriter backup;
628 1985 : bool eventClean = true;
629 1985 : auto & eventMin = apReadHandler->GetEventMin();
630 1985 : bool hasMoreChunks = false;
631 :
632 1985 : aReportDataBuilder.Checkpoint(backup);
633 :
634 1985 : VerifyOrExit(apReadHandler->GetEventPathList() != nullptr, );
635 :
636 : // If the mpEventManagement is not valid or has not been initialized,
637 : // skip the rest of processing
638 890 : VerifyOrExit(mpEventManagement != nullptr && mpEventManagement->IsValid(),
639 : ChipLogError(DataManagement, "EventManagement has not yet initialized"));
640 :
641 887 : eventClean = apReadHandler->CheckEventClean(*mpEventManagement);
642 :
643 : // proceed only if there are new events.
644 887 : if (eventClean)
645 : {
646 24 : ExitNow(); // Read clean, move along
647 : }
648 :
649 : {
650 : // Just like what we do in BuildSingleReportDataAttributeReportIBs(), we need to reserve one byte for end of container tag
651 : // when encoding events to ensure we can close the container successfully.
652 863 : const uint32_t kReservedSizeEndOfReportIBs = 1;
653 863 : EventReportIBs::Builder & eventReportIBs = aReportDataBuilder.CreateEventReports();
654 863 : SuccessOrExit(err = aReportDataBuilder.GetError());
655 863 : VerifyOrExit(eventReportIBs.GetWriter() != nullptr, err = CHIP_ERROR_INCORRECT_STATE);
656 863 : SuccessOrExit(err = eventReportIBs.GetWriter()->ReserveBuffer(kReservedSizeEndOfReportIBs));
657 :
658 863 : err = CheckAccessDeniedEventPaths(*(eventReportIBs.GetWriter()), hasEncodedStatus, apReadHandler);
659 863 : SuccessOrExit(err);
660 :
661 863 : err = mpEventManagement->FetchEventsSince(*(eventReportIBs.GetWriter()), apReadHandler->GetEventPathList(), eventMin,
662 863 : eventCount, apReadHandler->GetSubjectDescriptor());
663 :
664 863 : if ((err == CHIP_END_OF_TLV) || (err == CHIP_ERROR_TLV_UNDERRUN) || (err == CHIP_NO_ERROR))
665 : {
666 348 : err = CHIP_NO_ERROR;
667 348 : hasMoreChunks = false;
668 : }
669 515 : else if (IsOutOfWriterSpaceError(err))
670 : {
671 : // when first cluster event is too big to fit in the packet, ignore that cluster event.
672 : // However, we may have encoded some attributes before, we don't skip it in that case.
673 515 : if (eventCount == 0)
674 : {
675 206 : if (!aBufferIsUsed)
676 : {
677 0 : eventMin++;
678 : }
679 206 : ChipLogDetail(DataManagement, "<RE:Run> first cluster event is too big so that it fails to fit in the packet!");
680 206 : err = CHIP_NO_ERROR;
681 : }
682 : else
683 : {
684 : // `FetchEventsSince` has filled the available space
685 : // within the allowed buffer before it fit all the
686 : // available events. This is an expected condition,
687 : // so we do not propagate the error to higher levels;
688 : // instead, we terminate the event processing for now
689 309 : err = CHIP_NO_ERROR;
690 : }
691 515 : hasMoreChunks = true;
692 : }
693 : else
694 : {
695 : // All other errors are propagated to higher level.
696 : // Exiting here and returning an error will lead to
697 : // abandoning subscription.
698 0 : ExitNow();
699 : }
700 :
701 863 : SuccessOrExit(err = eventReportIBs.GetWriter()->UnreserveBuffer(kReservedSizeEndOfReportIBs));
702 863 : SuccessOrExit(err = eventReportIBs.EndOfEventReports());
703 : }
704 863 : ChipLogDetail(DataManagement, "Fetched %u events", static_cast<unsigned int>(eventCount));
705 :
706 0 : exit:
707 1985 : if (apHasEncodedData != nullptr)
708 : {
709 1985 : *apHasEncodedData = hasEncodedStatus || (eventCount != 0);
710 : }
711 :
712 : // Maybe encoding the attributes has already used up all space.
713 1985 : if ((err == CHIP_NO_ERROR || IsOutOfWriterSpaceError(err)) && !(hasEncodedStatus || (eventCount != 0)))
714 : {
715 1345 : aReportDataBuilder.Rollback(backup);
716 1345 : err = CHIP_NO_ERROR;
717 : }
718 :
719 : // hasMoreChunks + no data encoded is a flag that we have encountered some trouble when processing the attribute.
720 : // BuildAndSendSingleReportData will abort the read transaction if we encoded no attribute and no events but hasMoreChunks is
721 : // set.
722 1985 : if (apHasMoreChunks != nullptr)
723 : {
724 1985 : *apHasMoreChunks = hasMoreChunks;
725 : }
726 1985 : return err;
727 : }
728 :
729 1985 : CHIP_ERROR Engine::BuildAndSendSingleReportData(ReadHandler * apReadHandler)
730 : {
731 1985 : CHIP_ERROR err = CHIP_NO_ERROR;
732 1985 : System::PacketBufferTLVWriter reportDataWriter;
733 1985 : ReportDataMessage::Builder reportDataBuilder;
734 1985 : System::PacketBufferHandle bufHandle = nullptr;
735 1985 : uint16_t reservedSize = 0;
736 1985 : bool hasMoreChunks = false;
737 1985 : bool needCloseReadHandler = false;
738 1985 : size_t reportBufferMaxSize = 0;
739 :
740 : // Reserved size for the MoreChunks boolean flag, which takes up 1 byte for the control tag and 1 byte for the context tag.
741 1985 : const uint32_t kReservedSizeForMoreChunksFlag = 1 + 1;
742 :
743 : // Reserved size for the uint8_t InteractionModelRevision flag, which takes up 1 byte for the control tag and 1 byte for the
744 : // context tag, 1 byte for value
745 1985 : const uint32_t kReservedSizeForIMRevision = 1 + 1 + 1;
746 :
747 : // Reserved size for the end of report message, which is an end-of-container (i.e 1 byte for the control tag).
748 1985 : const uint32_t kReservedSizeForEndOfReportMessage = 1;
749 :
750 : // Reserved size for an empty EventReportIBs, so we can at least check if there are any events need to be reported.
751 1985 : const uint32_t kReservedSizeForEventReportIBs = 3; // type, tag, end of container
752 :
753 1985 : VerifyOrExit(apReadHandler != nullptr, err = CHIP_ERROR_INVALID_ARGUMENT);
754 1985 : VerifyOrExit(apReadHandler->GetSession() != nullptr, err = CHIP_ERROR_INCORRECT_STATE);
755 :
756 1985 : reportBufferMaxSize = apReadHandler->GetReportBufferMaxSize();
757 :
758 1985 : bufHandle = System::PacketBufferHandle::New(reportBufferMaxSize);
759 1985 : VerifyOrExit(!bufHandle.IsNull(), err = CHIP_ERROR_NO_MEMORY);
760 :
761 1985 : if (bufHandle->AvailableDataLength() > reportBufferMaxSize)
762 : {
763 0 : reservedSize = static_cast<uint16_t>(bufHandle->AvailableDataLength() - reportBufferMaxSize);
764 : }
765 :
766 1985 : reportDataWriter.Init(std::move(bufHandle));
767 :
768 : #if CONFIG_BUILD_FOR_HOST_UNIT_TEST
769 1985 : reportDataWriter.ReserveBuffer(mReservedSize);
770 : #endif
771 :
772 : // Always limit the size of the generated packet to fit within the max size returned by the ReadHandler regardless
773 : // of the available buffer capacity.
774 : // Also, we need to reserve some extra space for the MIC field.
775 1985 : reportDataWriter.ReserveBuffer(static_cast<uint32_t>(reservedSize + Crypto::CHIP_CRYPTO_AEAD_MIC_LENGTH_BYTES));
776 :
777 : // Create a report data.
778 1985 : err = reportDataBuilder.Init(&reportDataWriter);
779 1985 : SuccessOrExit(err);
780 :
781 1985 : if (apReadHandler->IsType(ReadHandler::InteractionType::Subscribe))
782 : {
783 : #if CHIP_CONFIG_ENABLE_ICD_SERVER
784 : // Notify the ICDManager that we are about to send a subscription report before we prepare the Report payload.
785 : // This allows the ICDManager to trigger any necessary updates and have the information in the report about to be sent.
786 : app::ICDNotifier::GetInstance().NotifySubscriptionReport();
787 : #endif // CHIP_CONFIG_ENABLE_ICD_SERVER
788 :
789 441 : SubscriptionId subscriptionId = 0;
790 441 : apReadHandler->GetSubscriptionId(subscriptionId);
791 441 : reportDataBuilder.SubscriptionId(subscriptionId);
792 : }
793 :
794 1985 : SuccessOrExit(err = reportDataWriter.ReserveBuffer(kReservedSizeForMoreChunksFlag + kReservedSizeForIMRevision +
795 : kReservedSizeForEndOfReportMessage + kReservedSizeForEventReportIBs));
796 :
797 : {
798 1985 : bool hasMoreChunksForAttributes = false;
799 1985 : bool hasMoreChunksForEvents = false;
800 1985 : bool hasEncodedAttributes = false;
801 1985 : bool hasEncodedEvents = false;
802 :
803 1985 : err = BuildSingleReportDataAttributeReportIBs(reportDataBuilder, apReadHandler, &hasMoreChunksForAttributes,
804 : &hasEncodedAttributes);
805 2016 : SuccessOrExit(err);
806 1985 : SuccessOrExit(err = reportDataWriter.UnreserveBuffer(kReservedSizeForEventReportIBs));
807 1985 : err = BuildSingleReportDataEventReports(reportDataBuilder, apReadHandler, hasEncodedAttributes, &hasMoreChunksForEvents,
808 : &hasEncodedEvents);
809 1985 : SuccessOrExit(err);
810 :
811 1985 : hasMoreChunks = hasMoreChunksForAttributes || hasMoreChunksForEvents;
812 :
813 1985 : if (!hasEncodedAttributes && !hasEncodedEvents && hasMoreChunks)
814 : {
815 31 : ChipLogError(DataManagement,
816 : "No data actually encoded but hasMoreChunks flag is set, close read handler! (attribute too big?)");
817 31 : err = apReadHandler->SendStatusReport(Protocols::InteractionModel::Status::ResourceExhausted);
818 31 : if (err == CHIP_NO_ERROR)
819 : {
820 31 : needCloseReadHandler = true;
821 : }
822 31 : ExitNow();
823 : }
824 : }
825 :
826 1954 : SuccessOrExit(err = reportDataBuilder.GetError());
827 1954 : SuccessOrExit(err = reportDataWriter.UnreserveBuffer(kReservedSizeForMoreChunksFlag + kReservedSizeForIMRevision +
828 : kReservedSizeForEndOfReportMessage));
829 1954 : if (hasMoreChunks)
830 : {
831 866 : reportDataBuilder.MoreChunkedMessages(true);
832 : }
833 1088 : else if (apReadHandler->IsType(ReadHandler::InteractionType::Read))
834 : {
835 705 : reportDataBuilder.SuppressResponse(true);
836 : }
837 :
838 1954 : reportDataBuilder.EndOfReportDataMessage();
839 :
840 : //
841 : // Since we've already reserved space for both the MoreChunked/SuppressResponse flags, as well as
842 : // the end-of-container flag for the end of the report, we should never hit an error closing out the message.
843 : //
844 1954 : VerifyOrDie(reportDataBuilder.GetError() == CHIP_NO_ERROR);
845 :
846 1954 : err = reportDataWriter.Finalize(&bufHandle);
847 1954 : SuccessOrExit(err);
848 :
849 1954 : ChipLogDetail(DataManagement, "<RE> Sending report (payload has %" PRIu32 " bytes)...", reportDataWriter.GetLengthWritten());
850 1954 : err = SendReport(apReadHandler, std::move(bufHandle), hasMoreChunks);
851 1954 : VerifyOrExit(err == CHIP_NO_ERROR,
852 : ChipLogError(DataManagement, "<RE> Error sending out report data with %" CHIP_ERROR_FORMAT "!", err.Format()));
853 :
854 1950 : ChipLogDetail(DataManagement, "<RE> ReportsInFlight = %" PRIu32 " with readHandler %" PRIu32 ", RE has %s", mNumReportsInFlight,
855 : mCurReadHandlerIdx, hasMoreChunks ? "more messages" : "no more messages");
856 :
857 0 : exit:
858 1985 : if (err != CHIP_NO_ERROR || (apReadHandler->IsType(ReadHandler::InteractionType::Read) && !hasMoreChunks) ||
859 : needCloseReadHandler)
860 : {
861 : //
862 : // In the case of successful report generation and we're on the last chunk of a read, we don't expect
863 : // any further activity on this exchange. The EC layer will automatically close our EC, so shutdown the ReadHandler
864 : // gracefully.
865 : //
866 738 : apReadHandler->Close();
867 : }
868 :
869 3970 : return err;
870 1985 : }
871 :
872 1763 : void Engine::Run(System::Layer * aSystemLayer, void * apAppState)
873 : {
874 1763 : Engine * const pEngine = reinterpret_cast<Engine *>(apAppState);
875 1763 : pEngine->mRunScheduled = false;
876 1763 : pEngine->Run();
877 1763 : }
878 :
879 2160 : CHIP_ERROR Engine::ScheduleRun()
880 : {
881 2160 : if (IsRunScheduled())
882 : {
883 397 : return CHIP_NO_ERROR;
884 : }
885 :
886 1763 : Messaging::ExchangeManager * exchangeManager = mpImEngine->GetExchangeManager();
887 1763 : if (exchangeManager == nullptr)
888 : {
889 0 : return CHIP_ERROR_INCORRECT_STATE;
890 : }
891 1763 : SessionManager * sessionManager = exchangeManager->GetSessionManager();
892 1763 : if (sessionManager == nullptr)
893 : {
894 0 : return CHIP_ERROR_INCORRECT_STATE;
895 : }
896 1763 : System::Layer * systemLayer = sessionManager->SystemLayer();
897 1763 : if (systemLayer == nullptr)
898 : {
899 0 : return CHIP_ERROR_INCORRECT_STATE;
900 : }
901 1763 : ReturnErrorOnFailure(systemLayer->ScheduleWork(Run, this));
902 1763 : mRunScheduled = true;
903 1763 : return CHIP_NO_ERROR;
904 : }
905 :
906 2074 : void Engine::Run()
907 : {
908 2074 : uint32_t numReadHandled = 0;
909 :
910 : // We may be deallocating read handlers as we go. Track how many we had
911 : // initially, so we make sure to go through all of them.
912 2074 : size_t initialAllocated = mpImEngine->mReadHandlers.Allocated();
913 4297 : while ((mNumReportsInFlight < CHIP_IM_MAX_REPORTS_IN_FLIGHT) && (numReadHandled < initialAllocated))
914 : {
915 : ReadHandler * readHandler =
916 2227 : mpImEngine->ActiveHandlerAt(mCurReadHandlerIdx % (uint32_t) mpImEngine->mReadHandlers.Allocated());
917 2227 : VerifyOrDie(readHandler != nullptr);
918 :
919 2227 : if (readHandler->ShouldReportUnscheduled() || mpImEngine->GetReportScheduler()->IsReportableNow(readHandler))
920 : {
921 :
922 1984 : mRunningReadHandler = readHandler;
923 1984 : CHIP_ERROR err = BuildAndSendSingleReportData(readHandler);
924 1984 : mRunningReadHandler = nullptr;
925 1984 : if (err != CHIP_NO_ERROR)
926 : {
927 4 : return;
928 : }
929 : }
930 :
931 2223 : numReadHandled++;
932 : // If readHandler removed itself from our list, we also decremented
933 : // mCurReadHandlerIdx to account for that removal, so it's safe to
934 : // increment here.
935 2223 : mCurReadHandlerIdx++;
936 : }
937 :
938 : //
939 : // If our tracker has exceeded the bounds of the handler list, reset it back to 0.
940 : // This isn't strictly necessary, but does make it easier to debug issues in this code if they
941 : // do arise.
942 : //
943 2070 : if (mCurReadHandlerIdx >= mpImEngine->mReadHandlers.Allocated())
944 : {
945 2013 : mCurReadHandlerIdx = 0;
946 : }
947 :
948 2070 : bool allReadClean = true;
949 :
950 2070 : mpImEngine->mReadHandlers.ForEachActiveObject([&allReadClean](ReadHandler * handler) {
951 2878 : if (handler->IsDirty())
952 : {
953 868 : allReadClean = false;
954 868 : return Loop::Break;
955 : }
956 :
957 2010 : return Loop::Continue;
958 : });
959 :
960 2070 : if (allReadClean)
961 : {
962 1202 : ChipLogDetail(DataManagement, "All ReadHandler-s are clean, clear GlobalDirtySet");
963 :
964 1202 : mGlobalDirtySet.ReleaseAll();
965 : }
966 : }
967 :
968 276 : bool Engine::MergeOverlappedAttributePath(const AttributePathParams & aAttributePath)
969 : {
970 276 : return Loop::Break == mGlobalDirtySet.ForEachActiveObject([&](auto * path) {
971 214 : if (path->IsAttributePathSupersetOf(aAttributePath))
972 : {
973 112 : path->mGeneration = GetDirtySetGeneration();
974 112 : return Loop::Break;
975 : }
976 102 : if (aAttributePath.IsAttributePathSupersetOf(*path))
977 : {
978 : // TODO: the wildcard input path may be superset of next paths in globalDirtySet, it is fine at this moment, since
979 : // when building report, it would use the first path of globalDirtySet to compare against interested paths read clients
980 : // want.
981 : // It is better to eliminate the duplicate wildcard paths in follow-up
982 2 : path->mGeneration = GetDirtySetGeneration();
983 2 : path->mEndpointId = aAttributePath.mEndpointId;
984 2 : path->mClusterId = aAttributePath.mClusterId;
985 2 : path->mListIndex = aAttributePath.mListIndex;
986 2 : path->mAttributeId = aAttributePath.mAttributeId;
987 2 : return Loop::Break;
988 : }
989 100 : return Loop::Continue;
990 276 : });
991 : }
992 :
993 8 : bool Engine::ClearTombPaths()
994 : {
995 8 : bool pathReleased = false;
996 8 : mGlobalDirtySet.ForEachActiveObject([&](auto * path) {
997 64 : if (path->mGeneration == 0)
998 : {
999 28 : mGlobalDirtySet.ReleaseObject(path);
1000 28 : pathReleased = true;
1001 : }
1002 64 : return Loop::Continue;
1003 : });
1004 8 : return pathReleased;
1005 : }
1006 :
1007 5 : bool Engine::MergeDirtyPathsUnderSameCluster()
1008 : {
1009 5 : mGlobalDirtySet.ForEachActiveObject([&](auto * outerPath) {
1010 40 : if (outerPath->HasWildcardClusterId() || outerPath->mGeneration == 0)
1011 : {
1012 14 : return Loop::Continue;
1013 : }
1014 26 : mGlobalDirtySet.ForEachActiveObject([&](auto * innerPath) {
1015 208 : if (innerPath == outerPath)
1016 : {
1017 26 : return Loop::Continue;
1018 : }
1019 : // We don't support paths with a wildcard endpoint + a concrete cluster in global dirty set, so we do a simple == check
1020 : // here.
1021 182 : if (innerPath->mEndpointId != outerPath->mEndpointId || innerPath->mClusterId != outerPath->mClusterId)
1022 : {
1023 168 : return Loop::Continue;
1024 : }
1025 14 : if (innerPath->mGeneration > outerPath->mGeneration)
1026 : {
1027 0 : outerPath->mGeneration = innerPath->mGeneration;
1028 : }
1029 14 : outerPath->SetWildcardAttributeId();
1030 :
1031 : // The object pool does not allow us to release objects in a nested iteration, mark the path as a tomb by setting its
1032 : // generation to 0 and then clear it later.
1033 14 : innerPath->mGeneration = 0;
1034 14 : return Loop::Continue;
1035 : });
1036 26 : return Loop::Continue;
1037 : });
1038 :
1039 5 : return ClearTombPaths();
1040 : }
1041 :
1042 3 : bool Engine::MergeDirtyPathsUnderSameEndpoint()
1043 : {
1044 3 : mGlobalDirtySet.ForEachActiveObject([&](auto * outerPath) {
1045 24 : if (outerPath->HasWildcardEndpointId() || outerPath->mGeneration == 0)
1046 : {
1047 14 : return Loop::Continue;
1048 : }
1049 10 : mGlobalDirtySet.ForEachActiveObject([&](auto * innerPath) {
1050 80 : if (innerPath == outerPath)
1051 : {
1052 10 : return Loop::Continue;
1053 : }
1054 70 : if (innerPath->mEndpointId != outerPath->mEndpointId)
1055 : {
1056 56 : return Loop::Continue;
1057 : }
1058 14 : if (innerPath->mGeneration > outerPath->mGeneration)
1059 : {
1060 0 : outerPath->mGeneration = innerPath->mGeneration;
1061 : }
1062 14 : outerPath->SetWildcardClusterId();
1063 14 : outerPath->SetWildcardAttributeId();
1064 :
1065 : // The object pool does not allow us to release objects in a nested iteration, mark the path as a tomb by setting its
1066 : // generation to 0 and then clear it later.
1067 14 : innerPath->mGeneration = 0;
1068 14 : return Loop::Continue;
1069 : });
1070 10 : return Loop::Continue;
1071 : });
1072 3 : return ClearTombPaths();
1073 : }
1074 :
1075 189 : CHIP_ERROR Engine::InsertPathIntoDirtySet(const AttributePathParams & aAttributePath)
1076 : {
1077 189 : VerifyOrReturnError(!MergeOverlappedAttributePath(aAttributePath), CHIP_NO_ERROR);
1078 :
1079 82 : if (mGlobalDirtySet.Exhausted() && !MergeDirtyPathsUnderSameCluster() && !MergeDirtyPathsUnderSameEndpoint())
1080 : {
1081 1 : ChipLogDetail(DataManagement, "Global dirty set pool exhausted, merge all paths.");
1082 1 : mGlobalDirtySet.ReleaseAll();
1083 1 : auto object = mGlobalDirtySet.CreateObject();
1084 1 : object->mGeneration = GetDirtySetGeneration();
1085 : }
1086 :
1087 82 : VerifyOrReturnError(!MergeOverlappedAttributePath(aAttributePath), CHIP_NO_ERROR);
1088 79 : ChipLogDetail(DataManagement, "Cannot merge the new path into any existing path, create one.");
1089 :
1090 79 : auto object = mGlobalDirtySet.CreateObject();
1091 79 : if (object == nullptr)
1092 : {
1093 : // This should not happen, this path should be merged into the wildcard endpoint at least.
1094 0 : ChipLogError(DataManagement, "mGlobalDirtySet pool full, cannot handle more entries!");
1095 0 : return CHIP_ERROR_NO_MEMORY;
1096 : }
1097 79 : *object = aAttributePath;
1098 79 : object->mGeneration = GetDirtySetGeneration();
1099 :
1100 79 : return CHIP_NO_ERROR;
1101 : }
1102 :
1103 5412 : CHIP_ERROR Engine::SetDirty(const AttributePathParams & aAttributePath)
1104 : {
1105 5412 : BumpDirtySetGeneration();
1106 :
1107 5412 : bool intersectsInterestPath = false;
1108 5412 : DataModel::Provider * dataModel = mpImEngine->GetDataModelProvider();
1109 5412 : mpImEngine->mReadHandlers.ForEachActiveObject([&dataModel, &aAttributePath, &intersectsInterestPath](ReadHandler * handler) {
1110 : // We call AttributePathIsDirty for both read interactions and subscribe interactions, since we may send inconsistent
1111 : // attribute data between two chunks. AttributePathIsDirty will not schedule a new run for read handlers which are
1112 : // waiting for a response to the last message chunk for read interactions.
1113 477 : if (handler->CanStartReporting() || handler->IsAwaitingReportResponse())
1114 : {
1115 934 : for (auto object = handler->GetAttributePathList(); object != nullptr; object = object->mpNext)
1116 : {
1117 802 : if (object->mValue.Intersects(aAttributePath))
1118 : {
1119 345 : handler->AttributePathIsDirty(dataModel, aAttributePath);
1120 345 : intersectsInterestPath = true;
1121 345 : break;
1122 : }
1123 : }
1124 : }
1125 :
1126 477 : return Loop::Continue;
1127 : });
1128 :
1129 5412 : if (!intersectsInterestPath)
1130 : {
1131 5228 : return CHIP_NO_ERROR;
1132 : }
1133 184 : ReturnErrorOnFailure(InsertPathIntoDirtySet(aAttributePath));
1134 :
1135 184 : return CHIP_NO_ERROR;
1136 : }
1137 :
1138 1954 : CHIP_ERROR Engine::SendReport(ReadHandler * apReadHandler, System::PacketBufferHandle && aPayload, bool aHasMoreChunks)
1139 : {
1140 1954 : CHIP_ERROR err = CHIP_NO_ERROR;
1141 :
1142 : // We can only have 1 report in flight for any given read - increment and break out.
1143 1954 : mNumReportsInFlight++;
1144 1954 : err = apReadHandler->SendReportData(std::move(aPayload), aHasMoreChunks);
1145 1954 : if (err != CHIP_NO_ERROR)
1146 : {
1147 4 : --mNumReportsInFlight;
1148 : }
1149 1954 : return err;
1150 : }
1151 :
1152 1950 : void Engine::OnReportConfirm()
1153 : {
1154 1950 : VerifyOrDie(mNumReportsInFlight > 0);
1155 :
1156 1950 : if (mNumReportsInFlight == CHIP_IM_MAX_REPORTS_IN_FLIGHT)
1157 : {
1158 : // We could have other things waiting to go now that this report is no
1159 : // longer in flight.
1160 61 : ScheduleRun();
1161 : }
1162 1950 : mNumReportsInFlight--;
1163 1950 : ChipLogDetail(DataManagement, "<RE> OnReportConfirm: NumReports = %" PRIu32, mNumReportsInFlight);
1164 1950 : }
1165 :
1166 20 : void Engine::GetMinEventLogPosition(uint32_t & aMinLogPosition)
1167 : {
1168 20 : mpImEngine->mReadHandlers.ForEachActiveObject([&aMinLogPosition](ReadHandler * handler) {
1169 20 : if (handler->IsType(ReadHandler::InteractionType::Read))
1170 : {
1171 0 : return Loop::Continue;
1172 : }
1173 :
1174 20 : uint32_t initialWrittenEventsBytes = handler->GetLastWrittenEventsBytes();
1175 20 : if (initialWrittenEventsBytes < aMinLogPosition)
1176 : {
1177 20 : aMinLogPosition = initialWrittenEventsBytes;
1178 : }
1179 :
1180 20 : return Loop::Continue;
1181 : });
1182 20 : }
1183 :
1184 20 : CHIP_ERROR Engine::ScheduleBufferPressureEventDelivery(uint32_t aBytesWritten)
1185 : {
1186 20 : uint32_t minEventLogPosition = aBytesWritten;
1187 20 : GetMinEventLogPosition(minEventLogPosition);
1188 20 : if (aBytesWritten - minEventLogPosition > CHIP_CONFIG_EVENT_LOGGING_BYTE_THRESHOLD)
1189 : {
1190 0 : ChipLogDetail(DataManagement, "<RE> Buffer overfilled CHIP_CONFIG_EVENT_LOGGING_BYTE_THRESHOLD %d, schedule engine run",
1191 : CHIP_CONFIG_EVENT_LOGGING_BYTE_THRESHOLD);
1192 0 : return ScheduleRun();
1193 : }
1194 20 : return CHIP_NO_ERROR;
1195 : }
1196 :
1197 663 : CHIP_ERROR Engine::NewEventGenerated(ConcreteEventPath & aPath, uint32_t aBytesConsumed)
1198 : {
1199 : // If we literally have no read handlers right now that care about any events,
1200 : // we don't need to call schedule run for event.
1201 : // If schedule run is called, actually we would not delivery events as well.
1202 : // Just wanna save one schedule run here
1203 663 : if (mpImEngine->mEventPathPool.Allocated() == 0)
1204 : {
1205 631 : return CHIP_NO_ERROR;
1206 : }
1207 :
1208 32 : bool isUrgentEvent = false;
1209 32 : mpImEngine->mReadHandlers.ForEachActiveObject([&aPath, &isUrgentEvent](ReadHandler * handler) {
1210 40 : if (handler->IsType(ReadHandler::InteractionType::Read))
1211 : {
1212 0 : return Loop::Continue;
1213 : }
1214 :
1215 104 : for (auto * interestedPath = handler->GetEventPathList(); interestedPath != nullptr;
1216 64 : interestedPath = interestedPath->mpNext)
1217 : {
1218 76 : if (interestedPath->mValue.IsEventPathSupersetOf(aPath) && interestedPath->mValue.mIsUrgentEvent)
1219 : {
1220 12 : isUrgentEvent = true;
1221 12 : handler->ForceDirtyState();
1222 12 : break;
1223 : }
1224 : }
1225 :
1226 40 : return Loop::Continue;
1227 : });
1228 :
1229 32 : if (isUrgentEvent)
1230 : {
1231 12 : ChipLogDetail(DataManagement, "Urgent event will be sent once reporting is not blocked by the min interval");
1232 12 : return CHIP_NO_ERROR;
1233 : }
1234 :
1235 20 : return ScheduleBufferPressureEventDelivery(aBytesConsumed);
1236 : }
1237 :
1238 311 : void Engine::ScheduleUrgentEventDeliverySync(Optional<FabricIndex> fabricIndex)
1239 : {
1240 311 : mpImEngine->mReadHandlers.ForEachActiveObject([fabricIndex](ReadHandler * handler) {
1241 0 : if (handler->IsType(ReadHandler::InteractionType::Read))
1242 : {
1243 0 : return Loop::Continue;
1244 : }
1245 :
1246 0 : if (fabricIndex.HasValue() && fabricIndex.Value() != handler->GetAccessingFabricIndex())
1247 : {
1248 0 : return Loop::Continue;
1249 : }
1250 :
1251 0 : handler->ForceDirtyState();
1252 :
1253 0 : return Loop::Continue;
1254 : });
1255 :
1256 311 : Run();
1257 311 : }
1258 :
1259 5146 : void Engine::MarkDirty(const AttributePathParams & path)
1260 : {
1261 5146 : CHIP_ERROR err = SetDirty(path);
1262 5146 : if (err != CHIP_NO_ERROR)
1263 : {
1264 0 : ChipLogError(DataManagement, "Failed to set path dirty: %" CHIP_ERROR_FORMAT, err.Format());
1265 : }
1266 5146 : }
1267 :
1268 : } // namespace reporting
1269 : } // namespace app
1270 : } // namespace chip
|