Line data Source code
1 : /*
2 : *
3 : * Copyright (c) 2021 Project CHIP Authors
4 : * All rights reserved.
5 : *
6 : * Licensed under the Apache License, Version 2.0 (the "License");
7 : * you may not use this file except in compliance with the License.
8 : * You may obtain a copy of the License at
9 : *
10 : * http://www.apache.org/licenses/LICENSE-2.0
11 : *
12 : * Unless required by applicable law or agreed to in writing, software
13 : * distributed under the License is distributed on an "AS IS" BASIS,
14 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 : * See the License for the specific language governing permissions and
16 : * limitations under the License.
17 : */
18 :
19 : #include <access/AccessRestrictionProvider.h>
20 : #include <access/Privilege.h>
21 : #include <app/AppConfig.h>
22 : #include <app/AttributePathExpandIterator.h>
23 : #include <app/ConcreteEventPath.h>
24 : #include <app/GlobalAttributes.h>
25 : #include <app/InteractionModelEngine.h>
26 : #include <app/MessageDef/StatusIB.h>
27 : #include <app/data-model-provider/ActionReturnStatus.h>
28 : #include <app/data-model-provider/MetadataLookup.h>
29 : #include <app/data-model-provider/MetadataTypes.h>
30 : #include <app/data-model-provider/Provider.h>
31 : #include <app/icd/server/ICDServerConfig.h>
32 : #include <app/reporting/Engine.h>
33 : #include <app/reporting/reporting.h>
34 : #include <app/util/MatterCallbacks.h>
35 : #include <lib/core/CHIPError.h>
36 : #include <lib/core/DataModelTypes.h>
37 : #include <lib/support/CodeUtils.h>
38 : #include <protocols/interaction_model/StatusCode.h>
39 :
40 : #include <optional>
41 :
42 : #if CHIP_CONFIG_ENABLE_ICD_SERVER
43 : #include <app/icd/server/ICDNotifier.h> // nogncheck
44 : #endif
45 :
46 : using namespace chip::Access;
47 :
48 : namespace chip {
49 : namespace app {
50 : namespace reporting {
51 : namespace {
52 :
53 : using DataModel::ReadFlags;
54 : using Protocols::InteractionModel::Status;
55 :
56 : /// Returns the status of ACL validation.
57 : /// If the return value has a status set, that means the ACL check failed,
58 : /// the read must not be performed, and the returned status (which may
59 : /// be success, when dealing with non-concrete paths) should be used
60 : /// as the status for the read.
61 : ///
62 : /// If the returned value is std::nullopt, that means the ACL check passed and the
63 : /// read should proceed.
64 9848 : std::optional<CHIP_ERROR> ValidateReadAttributeACL(const SubjectDescriptor & subjectDescriptor,
65 : const ConcreteReadAttributePath & path, Privilege requiredPrivilege)
66 : {
67 :
68 9848 : RequestPath requestPath{ .cluster = path.mClusterId,
69 9848 : .endpoint = path.mEndpointId,
70 : .requestType = RequestType::kAttributeReadRequest,
71 9848 : .entityId = path.mAttributeId };
72 :
73 9848 : CHIP_ERROR err = GetAccessControl().Check(subjectDescriptor, requestPath, requiredPrivilege);
74 19696 : if (err == CHIP_NO_ERROR)
75 : {
76 9847 : return std::nullopt;
77 : }
78 2 : VerifyOrReturnError((err == CHIP_ERROR_ACCESS_DENIED) || (err == CHIP_ERROR_ACCESS_RESTRICTED_BY_ARL), err);
79 :
80 : // Implementation of 8.4.3.2 of the spec for path expansion
81 1 : if (path.mExpanded)
82 : {
83 0 : return CHIP_NO_ERROR;
84 : }
85 :
86 : // access denied and access restricted have specific codes for IM
87 2 : return err == CHIP_ERROR_ACCESS_DENIED ? CHIP_IM_GLOBAL_STATUS(UnsupportedAccess) : CHIP_IM_GLOBAL_STATUS(AccessRestricted);
88 : }
89 :
90 : /// Checks that the given attribute path corresponds to a readable attribute. If not, it
91 : /// will return the corresponding failure status.
92 4924 : std::optional<Status> ValidateAttributeIsReadable(DataModel::Provider * dataModel, const ConcreteReadAttributePath & path,
93 : const std::optional<DataModel::AttributeEntry> & entry)
94 : {
95 4924 : if (!entry.has_value())
96 : {
97 1 : return DataModel::ValidateClusterPath(dataModel, path, Status::UnsupportedAttribute);
98 : }
99 :
100 4923 : if (!entry->GetReadPrivilege().has_value())
101 : {
102 0 : return Status::UnsupportedRead;
103 : }
104 :
105 4923 : return std::nullopt;
106 : }
107 :
108 4925 : DataModel::ActionReturnStatus RetrieveClusterData(DataModel::Provider * dataModel, const SubjectDescriptor & subjectDescriptor,
109 : BitFlags<ReadFlags> flags, AttributeReportIBs::Builder & reportBuilder,
110 : const ConcreteReadAttributePath & path, AttributeEncodeState * encoderState)
111 : {
112 4925 : ChipLogDetail(DataManagement, "<RE:Run> Cluster %" PRIx32 ", Attribute %" PRIx32 " is dirty", path.mClusterId,
113 : path.mAttributeId);
114 4925 : DataModelCallbacks::GetInstance()->AttributeOperation(DataModelCallbacks::OperationType::Read,
115 : DataModelCallbacks::OperationOrder::Pre, path);
116 :
117 4925 : DataModel::ReadAttributeRequest readRequest;
118 :
119 4925 : readRequest.readFlags = flags;
120 4925 : readRequest.subjectDescriptor = &subjectDescriptor;
121 4925 : readRequest.path = path;
122 :
123 4925 : DataModel::ServerClusterFinder serverClusterFinder(dataModel);
124 :
125 4925 : DataVersion version = 0;
126 4925 : if (auto clusterInfo = serverClusterFinder.Find(path); clusterInfo.has_value())
127 : {
128 4924 : version = clusterInfo->dataVersion;
129 : }
130 : else
131 : {
132 1 : ChipLogError(DataManagement, "Read request on unknown cluster - no data version available");
133 : }
134 :
135 4925 : TLV::TLVWriter checkpoint;
136 4925 : reportBuilder.Checkpoint(checkpoint);
137 :
138 4925 : DataModel::ActionReturnStatus status(CHIP_NO_ERROR);
139 4925 : bool isFabricFiltered = flags.Has(ReadFlags::kFabricFiltered);
140 4925 : AttributeValueEncoder attributeValueEncoder(reportBuilder, subjectDescriptor, path, version, isFabricFiltered, encoderState);
141 :
142 : // TODO: we explicitly DO NOT validate that path is a valid cluster path (even more, above serverClusterFinder
143 : // explicitly ignores that case).
144 : // Validation of attribute existence is done after ACL, in `ValidateAttributeIsReadable` below
145 : //
146 : // See https://github.com/project-chip/connectedhomeip/issues/37410
147 :
148 : // Execute the ACL Access Granting Algorithm before existence checks, assuming the required_privilege for the element is
149 : // View, to determine if the subject would have had at least some access against the concrete path. This is done so we don't
150 : // leak information if we do fail existence checks.
151 :
152 4925 : DataModel::AttributeFinder finder(dataModel);
153 4925 : std::optional<DataModel::AttributeEntry> entry = finder.Find(path);
154 :
155 4925 : if (auto access_status = ValidateReadAttributeACL(subjectDescriptor, path, Privilege::kView); access_status.has_value())
156 : {
157 1 : status = *access_status;
158 : }
159 4924 : else if (auto readable_status = ValidateAttributeIsReadable(dataModel, path, entry); readable_status.has_value())
160 : {
161 1 : status = *readable_status;
162 : }
163 : // Execute the ACL Access Granting Algorithm against the concrete path a second time, using the actual required_privilege.
164 : // entry->GetReadPrivilege() is guaranteed to have a value, since that condition is checked in the previous condition (inside
165 : // ValidateAttributeIsReadable()).
166 : // NOLINTNEXTLINE(bugprone-unchecked-optional-access)
167 9846 : else if (auto required_privilege_status = ValidateReadAttributeACL(subjectDescriptor, path, entry->GetReadPrivilege().value());
168 4923 : required_privilege_status.has_value())
169 : {
170 0 : status = *required_privilege_status;
171 : }
172 4923 : else if (IsSupportedGlobalAttributeNotInMetadata(readRequest.path.mAttributeId))
173 : {
174 : // Global attributes are NOT directly handled by data model providers, instead
175 : // they are routed through metadata.
176 1402 : status = ReadGlobalAttributeFromMetadata(dataModel, readRequest.path, attributeValueEncoder);
177 : }
178 : else
179 : {
180 3521 : status = dataModel->ReadAttribute(readRequest, attributeValueEncoder);
181 : }
182 :
183 4925 : if (status.IsSuccess())
184 : {
185 : // TODO: this callback being only executed on success is awkward. The Write callback is always done
186 : // for both read and write.
187 : //
188 : // For now this preserves existing/previous code logic, however we should consider to ALWAYS
189 : // call this.
190 4527 : DataModelCallbacks::GetInstance()->AttributeOperation(DataModelCallbacks::OperationType::Read,
191 : DataModelCallbacks::OperationOrder::Post, path);
192 4527 : return status;
193 : }
194 :
195 : // Encoder state is relevant for errors in case they are retryable.
196 : //
197 : // Generally only out of space encoding errors would be retryable, however we save the state
198 : // for all errors in case this is information that is useful (retry or error position).
199 398 : if (encoderState != nullptr)
200 : {
201 398 : *encoderState = attributeValueEncoder.GetState();
202 : }
203 :
204 : #if CHIP_CONFIG_DATA_MODEL_EXTRA_LOGGING
205 : // Out of space errors may be chunked data, reporting those cases would be very confusing
206 : // as they are not fully errors. Report only others (which presumably are not recoverable
207 : // and will be sent to the client as well).
208 398 : if (!status.IsOutOfSpaceEncodingResponse())
209 : {
210 2 : DataModel::ActionReturnStatus::StringStorage storage;
211 2 : ChipLogError(DataManagement, "Failed to read attribute: %s", status.c_str(storage));
212 : }
213 : #endif
214 398 : return status;
215 4925 : }
216 :
217 109 : bool IsClusterDataVersionEqualTo(DataModel::Provider * dataModel, const ConcreteClusterPath & path, DataVersion dataVersion)
218 : {
219 109 : DataModel::ServerClusterFinder serverClusterFinder(dataModel);
220 109 : auto info = serverClusterFinder.Find(path);
221 :
222 109 : return info.has_value() && (info->dataVersion == dataVersion);
223 109 : }
224 :
225 : /// Check if the given `err` is a known ACL error that can be translated into
226 : /// a StatusIB (UnsupportedAccess/AccessRestricted)
227 : ///
228 : /// Returns true if the error could be translated and places the result into `outStatus`.
229 : /// `path` is used for logging.
230 113 : bool IsTranslatableAclError(const ConcreteEventPath & path, const CHIP_ERROR & err, StatusIB & outStatus)
231 : {
232 337 : if ((err != CHIP_ERROR_ACCESS_DENIED) && (err != CHIP_ERROR_ACCESS_RESTRICTED_BY_ARL))
233 : {
234 111 : return false;
235 : }
236 :
237 4 : ChipLogDetail(InteractionModel, "Access to event (%u, " ChipLogFormatMEI ", " ChipLogFormatMEI ") denied by %s",
238 : path.mEndpointId, ChipLogValueMEI(path.mClusterId), ChipLogValueMEI(path.mEventId),
239 : err == CHIP_ERROR_ACCESS_DENIED ? "ACL" : "ARL");
240 :
241 4 : outStatus = err == CHIP_ERROR_ACCESS_DENIED ? StatusIB(Status::UnsupportedAccess) : StatusIB(Status::AccessRestricted);
242 2 : return true;
243 : }
244 :
245 58 : CHIP_ERROR CheckEventValidity(const ConcreteEventPath & path, const SubjectDescriptor & subjectDescriptor,
246 : DataModel::Provider * provider, StatusIB & outStatus)
247 : {
248 : // We validate ACL before Path, however this means we do not want the real ACL check
249 : // to be blocked by a `Invalid endpoint id` error when checking event info.
250 : // As a result, we check for VIEW privilege on the cluster first (most permissive)
251 : // and will do a 2nd check for the actual required privilege as a followup.
252 58 : RequestPath requestPath{
253 58 : .cluster = path.mClusterId,
254 58 : .endpoint = path.mEndpointId,
255 : .requestType = RequestType::kEventReadRequest,
256 58 : .entityId = path.mEventId,
257 58 : };
258 58 : CHIP_ERROR err = GetAccessControl().Check(subjectDescriptor, requestPath, Access::Privilege::kView);
259 58 : if (IsTranslatableAclError(path, err, outStatus))
260 : {
261 2 : return CHIP_NO_ERROR;
262 : }
263 56 : ReturnErrorOnFailure(err);
264 :
265 : DataModel::EventEntry eventInfo;
266 56 : err = provider->EventInfo(path, eventInfo);
267 112 : if (err != CHIP_NO_ERROR)
268 : {
269 : // cannot get event data to validate. Event is not supported.
270 : // we still fall through into "ValidateClusterPath" to try to return a `better code`
271 : // (i.e. say invalid endpoint or cluster), however if path seems ok we will
272 : // return unsupported event as we failed to get event metadata.
273 1 : outStatus = StatusIB(DataModel::ValidateClusterPath(provider, path, Status::UnsupportedEvent));
274 1 : return CHIP_NO_ERROR;
275 : }
276 :
277 : // Although EventInfo() was successful, we still need to Validate Cluster Path since providers MAY return CHIP_NO_ERROR although
278 : // events are unknown.
279 55 : Status status = DataModel::ValidateClusterPath(provider, path, Status::Success);
280 55 : if (status != Status::Success)
281 : {
282 : // a valid status available: failure
283 0 : outStatus = StatusIB(status);
284 0 : return CHIP_NO_ERROR;
285 : }
286 :
287 : // Per spec, the required-privilege ACL check is performed only after path existence is validated
288 55 : err = GetAccessControl().Check(subjectDescriptor, requestPath, eventInfo.readPrivilege);
289 55 : if (IsTranslatableAclError(path, err, outStatus))
290 : {
291 0 : return CHIP_NO_ERROR;
292 : }
293 55 : ReturnErrorOnFailure(err);
294 :
295 : // set up the status as "OK" Since all above checks passed
296 55 : outStatus = StatusIB(Status::Success);
297 :
298 : // Status was set above = Success
299 55 : return CHIP_NO_ERROR;
300 : }
301 :
302 : } // namespace
303 :
304 97 : Engine::Engine(InteractionModelEngine * apImEngine) : mpImEngine(apImEngine) {}
305 :
306 473 : CHIP_ERROR Engine::Init(EventManagement * apEventManagement)
307 : {
308 473 : VerifyOrReturnError(apEventManagement != nullptr, CHIP_ERROR_INVALID_ARGUMENT);
309 473 : mNumReportsInFlight = 0;
310 473 : mCurReadHandlerIdx = 0;
311 473 : mpEventManagement = apEventManagement;
312 :
313 473 : return CHIP_NO_ERROR;
314 : }
315 :
316 347 : void Engine::Shutdown()
317 : {
318 : // Flush out the event buffer synchronously
319 347 : ScheduleUrgentEventDeliverySync();
320 :
321 347 : mNumReportsInFlight = 0;
322 347 : mCurReadHandlerIdx = 0;
323 347 : mGlobalDirtySet.ReleaseAll();
324 347 : }
325 :
326 4722 : bool Engine::IsClusterDataVersionMatch(const SingleLinkedListNode<DataVersionFilter> * aDataVersionFilterList,
327 : const ConcreteReadAttributePath & aPath)
328 : {
329 4722 : bool existPathMatch = false;
330 4722 : bool existVersionMismatch = false;
331 43484 : for (auto filter = aDataVersionFilterList; filter != nullptr; filter = filter->mpNext)
332 : {
333 38762 : if (aPath.mEndpointId == filter->mValue.mEndpointId && aPath.mClusterId == filter->mValue.mClusterId)
334 : {
335 109 : existPathMatch = true;
336 :
337 109 : if (!IsClusterDataVersionEqualTo(mpImEngine->GetDataModelProvider(),
338 218 : ConcreteClusterPath(filter->mValue.mEndpointId, filter->mValue.mClusterId),
339 109 : filter->mValue.mDataVersion.Value()))
340 : {
341 79 : existVersionMismatch = true;
342 : }
343 : }
344 : }
345 4722 : return existPathMatch && !existVersionMismatch;
346 : }
347 :
348 2495 : static bool IsOutOfWriterSpaceError(CHIP_ERROR err)
349 : {
350 6574 : return err == CHIP_ERROR_NO_MEMORY || err == CHIP_ERROR_BUFFER_TOO_SMALL;
351 : }
352 :
353 1980 : CHIP_ERROR Engine::BuildSingleReportDataAttributeReportIBs(ReportDataMessage::Builder & aReportDataBuilder,
354 : ReadHandler * apReadHandler, bool * apHasMoreChunks,
355 : bool * apHasEncodedData)
356 : {
357 1980 : CHIP_ERROR err = CHIP_NO_ERROR;
358 1980 : bool attributeDataWritten = false;
359 1980 : bool hasMoreChunks = true;
360 1980 : TLV::TLVWriter backup;
361 1980 : const uint32_t kReservedSizeEndOfReportIBs = 1;
362 1980 : bool reservedEndOfReportIBs = false;
363 :
364 1980 : aReportDataBuilder.Checkpoint(backup);
365 :
366 1980 : AttributeReportIBs::Builder & attributeReportIBs = aReportDataBuilder.CreateAttributeReportIBs();
367 1980 : size_t emptyReportDataLength = 0;
368 :
369 1980 : SuccessOrExit(err = aReportDataBuilder.GetError());
370 :
371 1980 : emptyReportDataLength = attributeReportIBs.GetWriter()->GetLengthWritten();
372 : //
373 : // Reserve enough space for closing out the Report IB list
374 : //
375 1980 : SuccessOrExit(err = attributeReportIBs.GetWriter()->ReserveBuffer(kReservedSizeEndOfReportIBs));
376 1980 : reservedEndOfReportIBs = true;
377 :
378 : {
379 : // TODO: Figure out how AttributePathExpandIterator should handle read
380 : // vs write paths.
381 1980 : ConcreteAttributePath readPath;
382 :
383 1980 : ChipLogDetail(DataManagement,
384 : "Building Reports for ReadHandler with LastReportGeneration = 0x%08lX DirtyGeneration = 0x%08lX",
385 : static_cast<long>(apReadHandler->mPreviousReportsBeginGeneration.Raw()),
386 : static_cast<long>(apReadHandler->mDirtyGeneration.Raw()));
387 :
388 : // This ReadHandler is not generating reports, so we reset the iterator for a clean start.
389 1980 : if (!apReadHandler->IsReporting())
390 : {
391 1168 : apReadHandler->ResetPathIterator();
392 : }
393 :
394 : #if CONFIG_BUILD_FOR_HOST_UNIT_TEST
395 1980 : uint32_t attributesRead = 0;
396 : #endif
397 :
398 : // For each path included in the interested path of the read handler...
399 1980 : for (RollbackAttributePathExpandIterator iterator(mpImEngine->GetDataModelProvider(),
400 1980 : apReadHandler->AttributeIterationPosition());
401 6958 : iterator.Next(readPath); iterator.MarkCompleted())
402 : {
403 5394 : if (!apReadHandler->IsPriming())
404 : {
405 672 : bool concretePathDirty = false;
406 : // TODO: Optimize this implementation by making the iterator only emit intersected paths.
407 672 : mGlobalDirtySet.ForEachActiveObject([&](auto * dirtyPath) {
408 815 : if (dirtyPath->IsAttributePathSupersetOf(readPath))
409 : {
410 : // We don't need to worry about paths that were already marked dirty before the last time this read handler
411 : // started a report that it completed: those paths already got reported.
412 252 : if (dirtyPath->mGeneration.After(apReadHandler->mPreviousReportsBeginGeneration))
413 : {
414 249 : concretePathDirty = true;
415 249 : return Loop::Break;
416 : }
417 : }
418 566 : return Loop::Continue;
419 : });
420 :
421 672 : if (!concretePathDirty)
422 : {
423 : // This attribute is not dirty, we just skip this one.
424 423 : continue;
425 : }
426 : }
427 : else
428 : {
429 4722 : if (IsClusterDataVersionMatch(apReadHandler->GetDataVersionFilterList(), readPath))
430 : {
431 26 : continue;
432 : }
433 : }
434 :
435 : #if CONFIG_BUILD_FOR_HOST_UNIT_TEST
436 4945 : attributesRead++;
437 4945 : if (attributesRead > mMaxAttributesPerChunk)
438 : {
439 416 : ExitNow(err = CHIP_ERROR_BUFFER_TOO_SMALL);
440 : }
441 : #endif
442 :
443 : // If we are processing a read request, or the initial report of a subscription, just regard all paths as dirty
444 : // paths.
445 4925 : TLV::TLVWriter attributeBackup;
446 4925 : attributeReportIBs.Checkpoint(attributeBackup);
447 4925 : ConcreteReadAttributePath pathForRetrieval(readPath);
448 : // Load the saved state from previous encoding session for chunking of one single attribute (list chunking).
449 4925 : AttributeEncodeState encodeState = apReadHandler->GetAttributeEncodeState();
450 4925 : BitFlags<ReadFlags> flags;
451 4925 : flags.Set(ReadFlags::kFabricFiltered, apReadHandler->IsFabricFiltered());
452 4925 : flags.Set(ReadFlags::kAllowsLargePayload, apReadHandler->AllowsLargePayload());
453 : DataModel::ActionReturnStatus status =
454 4925 : RetrieveClusterData(mpImEngine->GetDataModelProvider(), apReadHandler->GetSubjectDescriptor(), flags,
455 : attributeReportIBs, pathForRetrieval, &encodeState);
456 4925 : if (status.IsError())
457 : {
458 : // Operation error set, since this will affect early return or override on status encoding
459 : // it will also be used for error reporting below.
460 398 : err = status.GetUnderlyingError();
461 :
462 : // If error is not an "out of writer space" error, rollback and encode status.
463 : // Otherwise, if partial data allowed, save the encode state.
464 : // Otherwise roll back. If we have already encoded some chunks, we are done; otherwise encode status.
465 :
466 398 : if (encodeState.AllowPartialData() && status.IsOutOfSpaceEncodingResponse())
467 : {
468 255 : ChipLogDetail(DataManagement,
469 : "List does not fit in packet, chunk between list items for clusterId: " ChipLogFormatMEI
470 : ", attributeId: " ChipLogFormatMEI,
471 : ChipLogValueMEI(pathForRetrieval.mClusterId), ChipLogValueMEI(pathForRetrieval.mAttributeId));
472 : // Encoding is aborted but partial data is allowed, then we don't rollback and save the state for next chunk.
473 : // The expectation is that RetrieveClusterData has already reset attributeReportIBs to a good state (rolled
474 : // back any partially-written AttributeReportIB instances, reset its error status). Since AllowPartialData()
475 : // is true, we may not have encoded a complete attribute value, but we did, if we encoded anything, encode a
476 : // set of complete AttributeReportIB instances that represent part of the attribute value.
477 255 : apReadHandler->SetAttributeEncodeState(encodeState);
478 : }
479 : else
480 : {
481 : // We met a error during writing reports, one common case is we are running out of buffer, rollback the
482 : // attributeReportIB to avoid any partial data.
483 143 : attributeReportIBs.Rollback(attributeBackup);
484 143 : apReadHandler->SetAttributeEncodeState(AttributeEncodeState());
485 :
486 143 : if (!status.IsOutOfSpaceEncodingResponse())
487 : {
488 2 : ChipLogError(DataManagement,
489 : "Fail to retrieve data, roll back and encode status on clusterId: " ChipLogFormatMEI
490 : ", attributeId: " ChipLogFormatMEI "err = %" CHIP_ERROR_FORMAT,
491 : ChipLogValueMEI(pathForRetrieval.mClusterId), ChipLogValueMEI(pathForRetrieval.mAttributeId),
492 : err.Format());
493 : // Try to encode our error as a status response.
494 2 : err = attributeReportIBs.EncodeAttributeStatus(pathForRetrieval, StatusIB(status.GetStatusCode()));
495 4 : if (err != CHIP_NO_ERROR)
496 : {
497 : // OK, just roll back again and give up; if we still ran out of space we
498 : // will send this status response in the next chunk.
499 0 : attributeReportIBs.Rollback(attributeBackup);
500 : }
501 : }
502 : else
503 : {
504 141 : ChipLogDetail(DataManagement,
505 : "Next attribute value does not fit in packet, roll back on clusterId: " ChipLogFormatMEI
506 : ", attributeId: " ChipLogFormatMEI ", err = %" CHIP_ERROR_FORMAT,
507 : ChipLogValueMEI(pathForRetrieval.mClusterId), ChipLogValueMEI(pathForRetrieval.mAttributeId),
508 : err.Format());
509 : }
510 : }
511 : }
512 4925 : SuccessOrExit(err);
513 : // Successfully encoded the attribute, clear the internal state.
514 4529 : apReadHandler->SetAttributeEncodeState(AttributeEncodeState());
515 1980 : }
516 :
517 : // We just visited all paths interested by this read handler and did not abort in the middle of iteration, there are no more
518 : // chunks for this report.
519 1564 : hasMoreChunks = false;
520 : }
521 1980 : exit:
522 1980 : if (attributeReportIBs.GetWriter()->GetLengthWritten() != emptyReportDataLength)
523 : {
524 : // We may encounter BUFFER_TOO_SMALL with nothing actually written for the case of list chunking, so we check if we have
525 : // actually
526 1309 : attributeDataWritten = true;
527 : }
528 :
529 1980 : if (apHasEncodedData != nullptr)
530 : {
531 1980 : *apHasEncodedData = attributeDataWritten;
532 : }
533 : //
534 : // Running out of space is an error that we're expected to handle - the incompletely written DataIB has already been rolled back
535 : // earlier to ensure only whole and complete DataIBs are present in the stream.
536 : //
537 : // We can safely clear out the error so that the rest of the machinery to close out the reports, etc. will function correctly.
538 : // These are are guaranteed to not fail since we've already reserved memory for the remaining 'close out' TLV operations in this
539 : // function and its callers.
540 : //
541 1980 : if (IsOutOfWriterSpaceError(err) && reservedEndOfReportIBs)
542 : {
543 416 : ChipLogDetail(DataManagement, "<RE:Run> We cannot put more chunks into this report. Enable chunking.");
544 416 : err = CHIP_NO_ERROR;
545 : }
546 :
547 : //
548 : // Only close out the report if we haven't hit an error yet so far.
549 : //
550 3960 : if (err == CHIP_NO_ERROR)
551 : {
552 1980 : TEMPORARY_RETURN_IGNORED attributeReportIBs.GetWriter()->UnreserveBuffer(kReservedSizeEndOfReportIBs);
553 :
554 1980 : err = attributeReportIBs.EndOfAttributeReportIBs();
555 :
556 : //
557 : // We reserved space for this earlier - consequently, the call to end the ReportIBs should
558 : // never fail, so assert if we do since that's a logic bug.
559 : //
560 3960 : VerifyOrDie(err == CHIP_NO_ERROR);
561 : }
562 :
563 : //
564 : // Rollback the the entire ReportIB array if we never wrote any attributes
565 : // AND never hit an error.
566 : //
567 2651 : if (!attributeDataWritten && err == CHIP_NO_ERROR)
568 : {
569 671 : aReportDataBuilder.Rollback(backup);
570 : }
571 :
572 : // hasMoreChunks + no data encoded is a flag that we have encountered some trouble when processing the attribute.
573 : // BuildAndSendSingleReportData will abort the read transaction if we encoded no attribute and no events but hasMoreChunks is
574 : // set.
575 1980 : if (apHasMoreChunks != nullptr)
576 : {
577 1980 : *apHasMoreChunks = hasMoreChunks;
578 : }
579 :
580 1980 : return err;
581 : }
582 :
583 864 : CHIP_ERROR Engine::CheckAccessDeniedEventPaths(TLV::TLVWriter & aWriter, bool & aHasEncodedData, ReadHandler * apReadHandler)
584 : {
585 : using Protocols::InteractionModel::Status;
586 :
587 864 : CHIP_ERROR err = CHIP_NO_ERROR;
588 1759 : for (auto current = apReadHandler->mpEventPathList; current != nullptr;)
589 : {
590 895 : if (current->mValue.IsWildcardPath())
591 : {
592 837 : current = current->mpNext;
593 837 : continue;
594 : }
595 :
596 58 : ConcreteEventPath path(current->mValue.mEndpointId, current->mValue.mClusterId, current->mValue.mEventId);
597 :
598 58 : StatusIB statusIB;
599 :
600 58 : ReturnErrorOnFailure(
601 : CheckEventValidity(path, apReadHandler->GetSubjectDescriptor(), mpImEngine->GetDataModelProvider(), statusIB));
602 :
603 58 : if (statusIB.IsFailure())
604 : {
605 3 : TLV::TLVWriter checkpoint = aWriter;
606 3 : err = EventReportIB::ConstructEventStatusIB(aWriter, path, statusIB);
607 6 : if (err != CHIP_NO_ERROR)
608 : {
609 0 : aWriter = checkpoint;
610 0 : break;
611 : }
612 3 : aHasEncodedData = true;
613 : }
614 :
615 58 : current = current->mpNext;
616 : }
617 :
618 864 : return err;
619 : }
620 :
621 1980 : CHIP_ERROR Engine::BuildSingleReportDataEventReports(ReportDataMessage::Builder & aReportDataBuilder, ReadHandler * apReadHandler,
622 : bool aBufferIsUsed, bool * apHasMoreChunks, bool * apHasEncodedData)
623 : {
624 1980 : CHIP_ERROR err = CHIP_NO_ERROR;
625 1980 : size_t eventCount = 0;
626 1980 : bool hasEncodedStatus = false;
627 1980 : TLV::TLVWriter backup;
628 1980 : bool eventClean = true;
629 1980 : auto & eventMin = apReadHandler->GetEventMin();
630 1980 : bool hasMoreChunks = false;
631 :
632 1980 : aReportDataBuilder.Checkpoint(backup);
633 :
634 1980 : VerifyOrExit(apReadHandler->GetEventPathList() != nullptr, );
635 :
636 : // If the mpEventManagement is not valid or has not been initialized,
637 : // skip the rest of processing
638 891 : VerifyOrExit(mpEventManagement != nullptr && mpEventManagement->IsValid(),
639 : ChipLogError(DataManagement, "EventManagement has not yet initialized"));
640 :
641 888 : eventClean = apReadHandler->CheckEventClean(*mpEventManagement);
642 :
643 : // proceed only if there are new events.
644 888 : if (eventClean)
645 : {
646 24 : ExitNow(); // Read clean, move along
647 : }
648 :
649 : {
650 : // Just like what we do in BuildSingleReportDataAttributeReportIBs(), we need to reserve one byte for end of container tag
651 : // when encoding events to ensure we can close the container successfully.
652 864 : const uint32_t kReservedSizeEndOfReportIBs = 1;
653 864 : EventReportIBs::Builder & eventReportIBs = aReportDataBuilder.CreateEventReports();
654 864 : SuccessOrExit(err = aReportDataBuilder.GetError());
655 864 : VerifyOrExit(eventReportIBs.GetWriter() != nullptr, err = CHIP_ERROR_INCORRECT_STATE);
656 864 : SuccessOrExit(err = eventReportIBs.GetWriter()->ReserveBuffer(kReservedSizeEndOfReportIBs));
657 :
658 864 : err = CheckAccessDeniedEventPaths(*(eventReportIBs.GetWriter()), hasEncodedStatus, apReadHandler);
659 864 : SuccessOrExit(err);
660 :
661 864 : err = mpEventManagement->FetchEventsSince(*(eventReportIBs.GetWriter()), apReadHandler->GetEventPathList(), eventMin,
662 864 : eventCount, apReadHandler->GetSubjectDescriptor());
663 :
664 3456 : if ((err == CHIP_END_OF_TLV) || (err == CHIP_ERROR_TLV_UNDERRUN) || (err == CHIP_NO_ERROR))
665 : {
666 349 : err = CHIP_NO_ERROR;
667 349 : hasMoreChunks = false;
668 : }
669 515 : else if (IsOutOfWriterSpaceError(err))
670 : {
671 : // when first cluster event is too big to fit in the packet, ignore that cluster event.
672 : // However, we may have encoded some attributes before, we don't skip it in that case.
673 515 : if (eventCount == 0)
674 : {
675 206 : if (!aBufferIsUsed)
676 : {
677 0 : eventMin++;
678 : }
679 206 : ChipLogDetail(DataManagement, "<RE:Run> first cluster event is too big so that it fails to fit in the packet!");
680 206 : err = CHIP_NO_ERROR;
681 : }
682 : else
683 : {
684 : // `FetchEventsSince` has filled the available space
685 : // within the allowed buffer before it fit all the
686 : // available events. This is an expected condition,
687 : // so we do not propagate the error to higher levels;
688 : // instead, we terminate the event processing for now
689 309 : err = CHIP_NO_ERROR;
690 : }
691 515 : hasMoreChunks = true;
692 : }
693 : else
694 : {
695 : // All other errors are propagated to higher level.
696 : // Exiting here and returning an error will lead to
697 : // abandoning subscription.
698 0 : ExitNow();
699 : }
700 :
701 864 : SuccessOrExit(err = eventReportIBs.GetWriter()->UnreserveBuffer(kReservedSizeEndOfReportIBs));
702 864 : SuccessOrExit(err = eventReportIBs.EndOfEventReports());
703 : }
704 864 : ChipLogDetail(DataManagement, "Fetched %u events", static_cast<unsigned int>(eventCount));
705 :
706 0 : exit:
707 1980 : if (apHasEncodedData != nullptr)
708 : {
709 1980 : *apHasEncodedData = hasEncodedStatus || (eventCount != 0);
710 : }
711 :
712 : // Maybe encoding the attributes has already used up all space.
713 3960 : if ((err == CHIP_NO_ERROR || IsOutOfWriterSpaceError(err)) && !(hasEncodedStatus || (eventCount != 0)))
714 : {
715 1339 : aReportDataBuilder.Rollback(backup);
716 1339 : err = CHIP_NO_ERROR;
717 : }
718 :
719 : // hasMoreChunks + no data encoded is a flag that we have encountered some trouble when processing the attribute.
720 : // BuildAndSendSingleReportData will abort the read transaction if we encoded no attribute and no events but hasMoreChunks is
721 : // set.
722 1980 : if (apHasMoreChunks != nullptr)
723 : {
724 1980 : *apHasMoreChunks = hasMoreChunks;
725 : }
726 1980 : return err;
727 : }
728 :
729 1980 : CHIP_ERROR Engine::BuildAndSendSingleReportData(ReadHandler * apReadHandler)
730 : {
731 1980 : CHIP_ERROR err = CHIP_NO_ERROR;
732 1980 : System::PacketBufferTLVWriter reportDataWriter;
733 1980 : ReportDataMessage::Builder reportDataBuilder;
734 1980 : System::PacketBufferHandle bufHandle = nullptr;
735 1980 : uint16_t reservedSize = 0;
736 1980 : bool hasMoreChunks = false;
737 1980 : bool needCloseReadHandler = false;
738 1980 : size_t reportBufferMaxSize = 0;
739 :
740 : // Reserved size for the MoreChunks boolean flag, which takes up 1 byte for the control tag and 1 byte for the context tag.
741 1980 : const uint32_t kReservedSizeForMoreChunksFlag = 1 + 1;
742 :
743 : // Reserved size for the uint8_t InteractionModelRevision flag, which takes up 1 byte for the control tag and 1 byte for the
744 : // context tag, 1 byte for value
745 1980 : const uint32_t kReservedSizeForIMRevision = 1 + 1 + 1;
746 :
747 : // Reserved size for the end of report message, which is an end-of-container (i.e 1 byte for the control tag).
748 1980 : const uint32_t kReservedSizeForEndOfReportMessage = 1;
749 :
750 : // Reserved size for an empty EventReportIBs, so we can at least check if there are any events need to be reported.
751 1980 : const uint32_t kReservedSizeForEventReportIBs = 3; // type, tag, end of container
752 :
753 1980 : VerifyOrExit(apReadHandler != nullptr, err = CHIP_ERROR_INVALID_ARGUMENT);
754 1980 : VerifyOrExit(apReadHandler->GetSession() != nullptr, err = CHIP_ERROR_INCORRECT_STATE);
755 :
756 1980 : reportBufferMaxSize = apReadHandler->GetReportBufferMaxSize();
757 :
758 1980 : bufHandle = System::PacketBufferHandle::New(reportBufferMaxSize);
759 1980 : VerifyOrExit(!bufHandle.IsNull(), err = CHIP_ERROR_NO_MEMORY);
760 :
761 1980 : if (bufHandle->AvailableDataLength() > reportBufferMaxSize)
762 : {
763 0 : reservedSize = static_cast<uint16_t>(bufHandle->AvailableDataLength() - reportBufferMaxSize);
764 : }
765 :
766 1980 : reportDataWriter.Init(std::move(bufHandle));
767 :
768 : #if CONFIG_BUILD_FOR_HOST_UNIT_TEST
769 1980 : SuccessOrExit(err = reportDataWriter.ReserveBuffer(mReservedSize));
770 : #endif
771 :
772 : // Always limit the size of the generated packet to fit within the max size returned by the ReadHandler regardless
773 : // of the available buffer capacity.
774 : // Also, we need to reserve some extra space for the MIC field.
775 1980 : SuccessOrExit(
776 : err = reportDataWriter.ReserveBuffer(static_cast<uint32_t>(reservedSize + Crypto::CHIP_CRYPTO_AEAD_MIC_LENGTH_BYTES)));
777 :
778 : // Create a report data.
779 1980 : err = reportDataBuilder.Init(&reportDataWriter);
780 1980 : SuccessOrExit(err);
781 :
782 1980 : if (apReadHandler->IsType(ReadHandler::InteractionType::Subscribe))
783 : {
784 : #if CHIP_CONFIG_ENABLE_ICD_SERVER
785 : // Notify the ICDManager that we are about to send a subscription report before we prepare the Report payload.
786 : // This allows the ICDManager to trigger any necessary updates and have the information in the report about to be sent.
787 : app::ICDNotifier::GetInstance().NotifySubscriptionReport();
788 : #endif // CHIP_CONFIG_ENABLE_ICD_SERVER
789 :
790 435 : SubscriptionId subscriptionId = 0;
791 435 : apReadHandler->GetSubscriptionId(subscriptionId);
792 435 : reportDataBuilder.SubscriptionId(subscriptionId);
793 : }
794 :
795 1980 : SuccessOrExit(err = reportDataWriter.ReserveBuffer(kReservedSizeForMoreChunksFlag + kReservedSizeForIMRevision +
796 : kReservedSizeForEndOfReportMessage + kReservedSizeForEventReportIBs));
797 :
798 : {
799 1980 : bool hasMoreChunksForAttributes = false;
800 1980 : bool hasMoreChunksForEvents = false;
801 1980 : bool hasEncodedAttributes = false;
802 1980 : bool hasEncodedEvents = false;
803 :
804 1980 : err = BuildSingleReportDataAttributeReportIBs(reportDataBuilder, apReadHandler, &hasMoreChunksForAttributes,
805 : &hasEncodedAttributes);
806 2011 : SuccessOrExit(err);
807 1980 : SuccessOrExit(err = reportDataWriter.UnreserveBuffer(kReservedSizeForEventReportIBs));
808 1980 : err = BuildSingleReportDataEventReports(reportDataBuilder, apReadHandler, hasEncodedAttributes, &hasMoreChunksForEvents,
809 : &hasEncodedEvents);
810 1980 : SuccessOrExit(err);
811 :
812 1980 : hasMoreChunks = hasMoreChunksForAttributes || hasMoreChunksForEvents;
813 :
814 1980 : if (!hasEncodedAttributes && !hasEncodedEvents && hasMoreChunks)
815 : {
816 31 : ChipLogError(DataManagement,
817 : "No data actually encoded but hasMoreChunks flag is set, close read handler! (attribute too big?)");
818 31 : err = apReadHandler->SendStatusReport(Protocols::InteractionModel::Status::ResourceExhausted);
819 62 : if (err == CHIP_NO_ERROR)
820 : {
821 31 : needCloseReadHandler = true;
822 : }
823 31 : ExitNow();
824 : }
825 : }
826 :
827 1949 : SuccessOrExit(err = reportDataBuilder.GetError());
828 1949 : SuccessOrExit(err = reportDataWriter.UnreserveBuffer(kReservedSizeForMoreChunksFlag + kReservedSizeForIMRevision +
829 : kReservedSizeForEndOfReportMessage));
830 1949 : if (hasMoreChunks)
831 : {
832 866 : reportDataBuilder.MoreChunkedMessages(true);
833 : }
834 1083 : else if (apReadHandler->IsType(ReadHandler::InteractionType::Read))
835 : {
836 706 : reportDataBuilder.SuppressResponse(true);
837 : }
838 :
839 : //
840 : // Since we've already reserved space for both the MoreChunked/SuppressResponse flags, as well as
841 : // the end-of-container flag for the end of the report, we should never hit an error closing out the message.
842 : //
843 1949 : SuccessOrDie(reportDataBuilder.EndOfReportDataMessage());
844 :
845 1949 : err = reportDataWriter.Finalize(&bufHandle);
846 1949 : SuccessOrExit(err);
847 :
848 1949 : ChipLogDetail(DataManagement, "<RE> Sending report (payload has %" PRIu32 " bytes)...", reportDataWriter.GetLengthWritten());
849 1949 : err = SendReport(apReadHandler, std::move(bufHandle), hasMoreChunks);
850 1949 : SuccessOrExitAction(
851 : err, ChipLogError(DataManagement, "<RE> Error sending out report data with %" CHIP_ERROR_FORMAT "!", err.Format()));
852 :
853 1945 : ChipLogDetail(DataManagement, "<RE> ReportsInFlight = %" PRIu32 " with readHandler %" PRIu32 ", RE has %s", mNumReportsInFlight,
854 : mCurReadHandlerIdx, hasMoreChunks ? "more messages" : "no more messages");
855 :
856 0 : exit:
857 3960 : if (err != CHIP_NO_ERROR || (apReadHandler->IsType(ReadHandler::InteractionType::Read) && !hasMoreChunks) ||
858 : needCloseReadHandler)
859 : {
860 : //
861 : // In the case of successful report generation and we're on the last chunk of a read, we don't expect
862 : // any further activity on this exchange. The EC layer will automatically close our EC, so shutdown the ReadHandler
863 : // gracefully.
864 : //
865 739 : apReadHandler->Close();
866 : }
867 :
868 3960 : return err;
869 1980 : }
870 :
871 1750 : void Engine::Run(System::Layer * aSystemLayer, void * apAppState)
872 : {
873 1750 : Engine * const pEngine = reinterpret_cast<Engine *>(apAppState);
874 1750 : pEngine->mRunScheduled = false;
875 1750 : pEngine->Run();
876 1750 : }
877 :
878 2146 : CHIP_ERROR Engine::ScheduleRun()
879 : {
880 2146 : if (IsRunScheduled())
881 : {
882 396 : return CHIP_NO_ERROR;
883 : }
884 :
885 1750 : Messaging::ExchangeManager * exchangeManager = mpImEngine->GetExchangeManager();
886 1750 : if (exchangeManager == nullptr)
887 : {
888 0 : return CHIP_ERROR_INCORRECT_STATE;
889 : }
890 1750 : SessionManager * sessionManager = exchangeManager->GetSessionManager();
891 1750 : if (sessionManager == nullptr)
892 : {
893 0 : return CHIP_ERROR_INCORRECT_STATE;
894 : }
895 1750 : System::Layer * systemLayer = sessionManager->SystemLayer();
896 1750 : if (systemLayer == nullptr)
897 : {
898 0 : return CHIP_ERROR_INCORRECT_STATE;
899 : }
900 3500 : ReturnErrorOnFailure(systemLayer->ScheduleWork(Run, this));
901 1750 : mRunScheduled = true;
902 1750 : return CHIP_NO_ERROR;
903 : }
904 :
905 2097 : void Engine::Run()
906 : {
907 2097 : uint32_t numReadHandled = 0;
908 :
909 : // We may be deallocating read handlers as we go. Track how many we had
910 : // initially, so we make sure to go through all of them.
911 2097 : size_t initialAllocated = mpImEngine->mReadHandlers.Allocated();
912 4308 : while ((mNumReportsInFlight < CHIP_IM_MAX_REPORTS_IN_FLIGHT) && (numReadHandled < initialAllocated))
913 : {
914 : ReadHandler * readHandler =
915 2215 : mpImEngine->ActiveHandlerAt(mCurReadHandlerIdx % (uint32_t) mpImEngine->mReadHandlers.Allocated());
916 2215 : VerifyOrDie(readHandler != nullptr);
917 :
918 2215 : if (readHandler->ShouldReportUnscheduled() || mpImEngine->GetReportScheduler()->IsReportableNow(readHandler))
919 : {
920 :
921 1979 : mRunningReadHandler = readHandler;
922 1979 : CHIP_ERROR err = BuildAndSendSingleReportData(readHandler);
923 1979 : mRunningReadHandler = nullptr;
924 3958 : if (err != CHIP_NO_ERROR)
925 : {
926 4 : return;
927 : }
928 : }
929 :
930 2211 : numReadHandled++;
931 : // If readHandler removed itself from our list, we also decremented
932 : // mCurReadHandlerIdx to account for that removal, so it's safe to
933 : // increment here.
934 2211 : mCurReadHandlerIdx++;
935 : }
936 :
937 : //
938 : // If our tracker has exceeded the bounds of the handler list, reset it back to 0.
939 : // This isn't strictly necessary, but does make it easier to debug issues in this code if they
940 : // do arise.
941 : //
942 2093 : if (mCurReadHandlerIdx >= mpImEngine->mReadHandlers.Allocated())
943 : {
944 2036 : mCurReadHandlerIdx = 0;
945 : }
946 :
947 2093 : bool allReadClean = true;
948 :
949 2093 : mpImEngine->mReadHandlers.ForEachActiveObject([&allReadClean](ReadHandler * handler) {
950 2865 : if (handler->IsDirty())
951 : {
952 868 : allReadClean = false;
953 868 : return Loop::Break;
954 : }
955 :
956 1997 : return Loop::Continue;
957 : });
958 :
959 2093 : if (allReadClean)
960 : {
961 1225 : ChipLogDetail(DataManagement, "All ReadHandler-s are clean, clear GlobalDirtySet");
962 :
963 1225 : mGlobalDirtySet.ReleaseAll();
964 : }
965 : }
966 :
967 276 : bool Engine::MergeOverlappedAttributePath(const AttributePathParams & aAttributePath)
968 : {
969 276 : return Loop::Break == mGlobalDirtySet.ForEachActiveObject([&](auto * path) {
970 214 : if (path->IsAttributePathSupersetOf(aAttributePath))
971 : {
972 112 : path->mGeneration = GetDirtySetGeneration();
973 112 : return Loop::Break;
974 : }
975 102 : if (aAttributePath.IsAttributePathSupersetOf(*path))
976 : {
977 : // TODO: the wildcard input path may be superset of next paths in globalDirtySet, it is fine at this moment, since
978 : // when building report, it would use the first path of globalDirtySet to compare against interested paths read clients
979 : // want.
980 : // It is better to eliminate the duplicate wildcard paths in follow-up
981 2 : path->mGeneration = GetDirtySetGeneration();
982 2 : path->mEndpointId = aAttributePath.mEndpointId;
983 2 : path->mClusterId = aAttributePath.mClusterId;
984 2 : path->mListIndex = aAttributePath.mListIndex;
985 2 : path->mAttributeId = aAttributePath.mAttributeId;
986 2 : return Loop::Break;
987 : }
988 100 : return Loop::Continue;
989 276 : });
990 : }
991 :
992 8 : bool Engine::ClearTombPaths()
993 : {
994 8 : bool pathReleased = false;
995 8 : mGlobalDirtySet.ForEachActiveObject([&](auto * path) {
996 64 : if (path->mGeneration.IsZero())
997 : {
998 28 : mGlobalDirtySet.ReleaseObject(path);
999 28 : pathReleased = true;
1000 : }
1001 64 : return Loop::Continue;
1002 : });
1003 8 : return pathReleased;
1004 : }
1005 :
1006 5 : bool Engine::MergeDirtyPathsUnderSameCluster()
1007 : {
1008 5 : mGlobalDirtySet.ForEachActiveObject([&](auto * outerPath) {
1009 40 : if (outerPath->HasWildcardClusterId() || outerPath->mGeneration.IsZero())
1010 : {
1011 14 : return Loop::Continue;
1012 : }
1013 26 : mGlobalDirtySet.ForEachActiveObject([&](auto * innerPath) {
1014 208 : if (innerPath == outerPath)
1015 : {
1016 26 : return Loop::Continue;
1017 : }
1018 : // We don't support paths with a wildcard endpoint + a concrete cluster in global dirty set, so we do a simple == check
1019 : // here.
1020 182 : if (innerPath->mEndpointId != outerPath->mEndpointId || innerPath->mClusterId != outerPath->mClusterId)
1021 : {
1022 168 : return Loop::Continue;
1023 : }
1024 14 : if (innerPath->mGeneration.After(outerPath->mGeneration))
1025 : {
1026 0 : outerPath->mGeneration = innerPath->mGeneration;
1027 : }
1028 14 : outerPath->SetWildcardAttributeId();
1029 :
1030 : // The object pool does not allow us to release objects in a nested iteration, mark the path as a tomb by setting its
1031 : // generation to 0 and then clear it later.
1032 14 : innerPath->mGeneration.Clear();
1033 14 : return Loop::Continue;
1034 : });
1035 26 : return Loop::Continue;
1036 : });
1037 :
1038 5 : return ClearTombPaths();
1039 : }
1040 :
1041 3 : bool Engine::MergeDirtyPathsUnderSameEndpoint()
1042 : {
1043 3 : mGlobalDirtySet.ForEachActiveObject([&](auto * outerPath) {
1044 24 : if (outerPath->HasWildcardEndpointId() || outerPath->mGeneration.IsZero())
1045 : {
1046 14 : return Loop::Continue;
1047 : }
1048 10 : mGlobalDirtySet.ForEachActiveObject([&](auto * innerPath) {
1049 80 : if (innerPath == outerPath)
1050 : {
1051 10 : return Loop::Continue;
1052 : }
1053 70 : if (innerPath->mEndpointId != outerPath->mEndpointId)
1054 : {
1055 56 : return Loop::Continue;
1056 : }
1057 14 : if (innerPath->mGeneration.After(outerPath->mGeneration))
1058 : {
1059 0 : outerPath->mGeneration = innerPath->mGeneration;
1060 : }
1061 14 : outerPath->SetWildcardClusterId();
1062 14 : outerPath->SetWildcardAttributeId();
1063 :
1064 : // The object pool does not allow us to release objects in a nested iteration, mark the path as a tomb by setting its
1065 : // generation to 0 and then clear it later.
1066 14 : innerPath->mGeneration.Clear();
1067 14 : return Loop::Continue;
1068 : });
1069 10 : return Loop::Continue;
1070 : });
1071 3 : return ClearTombPaths();
1072 : }
1073 :
1074 189 : CHIP_ERROR Engine::InsertPathIntoDirtySet(const AttributePathParams & aAttributePath)
1075 : {
1076 189 : VerifyOrReturnError(!MergeOverlappedAttributePath(aAttributePath), CHIP_NO_ERROR);
1077 :
1078 82 : if (mGlobalDirtySet.Exhausted() && !MergeDirtyPathsUnderSameCluster() && !MergeDirtyPathsUnderSameEndpoint())
1079 : {
1080 1 : ChipLogDetail(DataManagement, "Global dirty set pool exhausted, merge all paths.");
1081 1 : mGlobalDirtySet.ReleaseAll();
1082 1 : auto object = mGlobalDirtySet.CreateObject();
1083 1 : object->mGeneration = GetDirtySetGeneration();
1084 : }
1085 :
1086 82 : VerifyOrReturnError(!MergeOverlappedAttributePath(aAttributePath), CHIP_NO_ERROR);
1087 79 : ChipLogDetail(DataManagement, "Cannot merge the new path into any existing path, create one.");
1088 :
1089 79 : auto object = mGlobalDirtySet.CreateObject();
1090 79 : if (object == nullptr)
1091 : {
1092 : // This should not happen, this path should be merged into the wildcard endpoint at least.
1093 0 : ChipLogError(DataManagement, "mGlobalDirtySet pool full, cannot handle more entries!");
1094 0 : return CHIP_ERROR_NO_MEMORY;
1095 : }
1096 79 : *object = aAttributePath;
1097 79 : object->mGeneration = GetDirtySetGeneration();
1098 :
1099 79 : return CHIP_NO_ERROR;
1100 : }
1101 :
1102 5412 : CHIP_ERROR Engine::SetDirty(const AttributePathParams & aAttributePath)
1103 : {
1104 5412 : BumpDirtySetGeneration();
1105 :
1106 5412 : bool intersectsInterestPath = false;
1107 5412 : DataModel::Provider * dataModel = mpImEngine->GetDataModelProvider();
1108 5412 : mpImEngine->mReadHandlers.ForEachActiveObject([&dataModel, &aAttributePath, &intersectsInterestPath](ReadHandler * handler) {
1109 : // We call AttributePathIsDirty for both read interactions and subscribe interactions, since we may send inconsistent
1110 : // attribute data between two chunks. AttributePathIsDirty will not schedule a new run for read handlers which are
1111 : // waiting for a response to the last message chunk for read interactions.
1112 477 : if (handler->CanStartReporting() || handler->IsAwaitingReportResponse())
1113 : {
1114 934 : for (auto object = handler->GetAttributePathList(); object != nullptr; object = object->mpNext)
1115 : {
1116 802 : if (object->mValue.Intersects(aAttributePath))
1117 : {
1118 345 : handler->AttributePathIsDirty(dataModel, aAttributePath);
1119 345 : intersectsInterestPath = true;
1120 345 : break;
1121 : }
1122 : }
1123 : }
1124 :
1125 477 : return Loop::Continue;
1126 : });
1127 :
1128 5412 : if (!intersectsInterestPath)
1129 : {
1130 5228 : return CHIP_NO_ERROR;
1131 : }
1132 184 : ReturnErrorOnFailure(InsertPathIntoDirtySet(aAttributePath));
1133 :
1134 184 : return CHIP_NO_ERROR;
1135 : }
1136 :
1137 1949 : CHIP_ERROR Engine::SendReport(ReadHandler * apReadHandler, System::PacketBufferHandle && aPayload, bool aHasMoreChunks)
1138 : {
1139 1949 : CHIP_ERROR err = CHIP_NO_ERROR;
1140 :
1141 : // We can only have 1 report in flight for any given read - increment and break out.
1142 1949 : mNumReportsInFlight++;
1143 1949 : err = apReadHandler->SendReportData(std::move(aPayload), aHasMoreChunks);
1144 3898 : if (err != CHIP_NO_ERROR)
1145 : {
1146 4 : --mNumReportsInFlight;
1147 : }
1148 1949 : return err;
1149 : }
1150 :
1151 1945 : void Engine::OnReportConfirm()
1152 : {
1153 1945 : VerifyOrDie(mNumReportsInFlight > 0);
1154 :
1155 1945 : if (mNumReportsInFlight == CHIP_IM_MAX_REPORTS_IN_FLIGHT)
1156 : {
1157 : // We could have other things waiting to go now that this report is no
1158 : // longer in flight.
1159 61 : TEMPORARY_RETURN_IGNORED ScheduleRun();
1160 : }
1161 1945 : mNumReportsInFlight--;
1162 1945 : ChipLogDetail(DataManagement, "<RE> OnReportConfirm: NumReports = %" PRIu32, mNumReportsInFlight);
1163 1945 : }
1164 :
1165 20 : void Engine::GetMinEventLogPosition(uint32_t & aMinLogPosition)
1166 : {
1167 20 : mpImEngine->mReadHandlers.ForEachActiveObject([&aMinLogPosition](ReadHandler * handler) {
1168 20 : if (handler->IsType(ReadHandler::InteractionType::Read))
1169 : {
1170 0 : return Loop::Continue;
1171 : }
1172 :
1173 20 : uint32_t initialWrittenEventsBytes = handler->GetLastWrittenEventsBytes();
1174 20 : if (initialWrittenEventsBytes < aMinLogPosition)
1175 : {
1176 20 : aMinLogPosition = initialWrittenEventsBytes;
1177 : }
1178 :
1179 20 : return Loop::Continue;
1180 : });
1181 20 : }
1182 :
1183 20 : CHIP_ERROR Engine::ScheduleBufferPressureEventDelivery(uint32_t aBytesWritten)
1184 : {
1185 20 : uint32_t minEventLogPosition = aBytesWritten;
1186 20 : GetMinEventLogPosition(minEventLogPosition);
1187 20 : if (aBytesWritten - minEventLogPosition > CHIP_CONFIG_EVENT_LOGGING_BYTE_THRESHOLD)
1188 : {
1189 0 : ChipLogDetail(DataManagement, "<RE> Buffer overfilled CHIP_CONFIG_EVENT_LOGGING_BYTE_THRESHOLD %d, schedule engine run",
1190 : CHIP_CONFIG_EVENT_LOGGING_BYTE_THRESHOLD);
1191 0 : return ScheduleRun();
1192 : }
1193 20 : return CHIP_NO_ERROR;
1194 : }
1195 :
1196 665 : CHIP_ERROR Engine::NewEventGenerated(ConcreteEventPath & aPath, uint32_t aBytesConsumed)
1197 : {
1198 : // If we literally have no read handlers right now that care about any events,
1199 : // we don't need to call schedule run for event.
1200 : // If schedule run is called, actually we would not delivery events as well.
1201 : // Just wanna save one schedule run here
1202 665 : if (mpImEngine->mEventPathPool.Allocated() == 0)
1203 : {
1204 633 : return CHIP_NO_ERROR;
1205 : }
1206 :
1207 32 : bool isUrgentEvent = false;
1208 32 : mpImEngine->mReadHandlers.ForEachActiveObject([&aPath, &isUrgentEvent](ReadHandler * handler) {
1209 40 : if (handler->IsType(ReadHandler::InteractionType::Read))
1210 : {
1211 0 : return Loop::Continue;
1212 : }
1213 :
1214 104 : for (auto * interestedPath = handler->GetEventPathList(); interestedPath != nullptr;
1215 64 : interestedPath = interestedPath->mpNext)
1216 : {
1217 76 : if (interestedPath->mValue.IsEventPathSupersetOf(aPath) && interestedPath->mValue.mIsUrgentEvent)
1218 : {
1219 12 : isUrgentEvent = true;
1220 12 : handler->ForceDirtyState();
1221 12 : break;
1222 : }
1223 : }
1224 :
1225 40 : return Loop::Continue;
1226 : });
1227 :
1228 32 : if (isUrgentEvent)
1229 : {
1230 12 : ChipLogDetail(DataManagement, "Urgent event will be sent once reporting is not blocked by the min interval");
1231 12 : return CHIP_NO_ERROR;
1232 : }
1233 :
1234 20 : return ScheduleBufferPressureEventDelivery(aBytesConsumed);
1235 : }
1236 :
1237 347 : void Engine::ScheduleUrgentEventDeliverySync(Optional<FabricIndex> fabricIndex)
1238 : {
1239 347 : mpImEngine->mReadHandlers.ForEachActiveObject([fabricIndex](ReadHandler * handler) {
1240 0 : if (handler->IsType(ReadHandler::InteractionType::Read))
1241 : {
1242 0 : return Loop::Continue;
1243 : }
1244 :
1245 0 : if (fabricIndex.HasValue() && fabricIndex.Value() != handler->GetAccessingFabricIndex())
1246 : {
1247 0 : return Loop::Continue;
1248 : }
1249 :
1250 0 : handler->ForceDirtyState();
1251 :
1252 0 : return Loop::Continue;
1253 : });
1254 :
1255 347 : Run();
1256 347 : }
1257 :
1258 5232 : void Engine::MarkDirty(const AttributePathParams & path)
1259 : {
1260 5232 : CHIP_ERROR err = SetDirty(path);
1261 10464 : if (err != CHIP_NO_ERROR)
1262 : {
1263 0 : ChipLogError(DataManagement, "Failed to set path dirty: %" CHIP_ERROR_FORMAT, err.Format());
1264 : }
1265 5232 : }
1266 :
1267 : } // namespace reporting
1268 : } // namespace app
1269 : } // namespace chip
|