Line data Source code
1 : /*
2 : *
3 : * Copyright (c) 2021 Project CHIP Authors
4 : * All rights reserved.
5 : *
6 : * Licensed under the Apache License, Version 2.0 (the "License");
7 : * you may not use this file except in compliance with the License.
8 : * You may obtain a copy of the License at
9 : *
10 : * http://www.apache.org/licenses/LICENSE-2.0
11 : *
12 : * Unless required by applicable law or agreed to in writing, software
13 : * distributed under the License is distributed on an "AS IS" BASIS,
14 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 : * See the License for the specific language governing permissions and
16 : * limitations under the License.
17 : */
18 :
19 : #include <access/AccessRestrictionProvider.h>
20 : #include <access/Privilege.h>
21 : #include <app/AppConfig.h>
22 : #include <app/AttributePathExpandIterator.h>
23 : #include <app/ConcreteEventPath.h>
24 : #include <app/GlobalAttributes.h>
25 : #include <app/InteractionModelEngine.h>
26 : #include <app/MessageDef/StatusIB.h>
27 : #include <app/data-model-provider/ActionReturnStatus.h>
28 : #include <app/data-model-provider/MetadataLookup.h>
29 : #include <app/data-model-provider/MetadataTypes.h>
30 : #include <app/data-model-provider/Provider.h>
31 : #include <app/icd/server/ICDServerConfig.h>
32 : #include <app/reporting/Engine.h>
33 : #include <app/reporting/reporting.h>
34 : #include <app/util/MatterCallbacks.h>
35 : #include <lib/core/CHIPError.h>
36 : #include <lib/core/DataModelTypes.h>
37 : #include <lib/support/CodeUtils.h>
38 : #include <protocols/interaction_model/StatusCode.h>
39 :
40 : #include <optional>
41 :
42 : #if CHIP_CONFIG_ENABLE_ICD_SERVER
43 : #include <app/icd/server/ICDNotifier.h> // nogncheck
44 : #endif
45 :
46 : using namespace chip::Access;
47 :
48 : namespace chip {
49 : namespace app {
50 : namespace reporting {
51 : namespace {
52 :
53 : using Protocols::InteractionModel::Status;
54 :
55 : /// Returns the status of ACL validation.
56 : /// If the return value has a status set, that means the ACL check failed,
57 : /// the read must not be performed, and the returned status (which may
58 : /// be success, when dealing with non-concrete paths) should be used
59 : /// as the status for the read.
60 : ///
61 : /// If the returned value is std::nullopt, that means the ACL check passed and the
62 : /// read should proceed.
63 4424 : std::optional<CHIP_ERROR> ValidateReadAttributeACL(DataModel::Provider * dataModel, const SubjectDescriptor & subjectDescriptor,
64 : const ConcreteReadAttributePath & path)
65 : {
66 :
67 4424 : RequestPath requestPath{ .cluster = path.mClusterId,
68 4424 : .endpoint = path.mEndpointId,
69 : .requestType = RequestType::kAttributeReadRequest,
70 4424 : .entityId = path.mAttributeId };
71 :
72 4424 : DataModel::AttributeFinder finder(dataModel);
73 :
74 4424 : std::optional<DataModel::AttributeEntry> info = finder.Find(path);
75 :
76 : // If the attribute exists, we know whether it is readable (readPrivilege has value)
77 : // and what the required access privilege is. However for attributes missing from the metatada
78 : // (e.g. global attributes) or completely missing attributes we do not actually know of a required
79 : // privilege and default to kView (this is correct for global attributes and a reasonable check
80 : // for others)
81 4424 : Privilege requiredPrivilege = Privilege::kView;
82 4424 : if (info.has_value())
83 : {
84 : // if attribute exists and is readable, set the correct read privilege; otherwise, set default value
85 4310 : requiredPrivilege = info->GetReadPrivilege().value_or(requiredPrivilege);
86 : }
87 :
88 4424 : CHIP_ERROR err = GetAccessControl().Check(subjectDescriptor, requestPath, requiredPrivilege);
89 4424 : if (err == CHIP_NO_ERROR)
90 : {
91 4424 : if (IsSupportedGlobalAttributeNotInMetadata(path.mAttributeId))
92 : {
93 : // Global attributes passing a kView check is ok
94 1190 : return std::nullopt;
95 : }
96 :
97 : // We want to return "success" (i.e. nulopt) IF AND ONLY IF the attribute exists and is readable (has read privilege).
98 : // Since the Access control check above may have passed with kView, we do another check here:
99 : // - Attribute exists (info has value)
100 : // - Attribute is readable (readProvilege has value) and not "write only"
101 : // If the attribute exists and is not readable, we will return UnsupportedRead (spec 8.4.3.2: "Else if the path indicates
102 : // attribute data that is not readable, an AttributeStatusIB SHALL be generated with the UNSUPPORTED_READ Status Code.")
103 : //
104 : // TODO:: https://github.com/CHIP-Specifications/connectedhomeip-spec/pull/9024 requires interleaved ordering that
105 : // is NOT implemented here. Spec requires:
106 : // - check cluster access check (done here as kView at least)
107 : // - unsupported endpoint/cluster/attribute check (NOT done here) when the attribute is missing.
108 : // this SHOULD be done here when info does not have a value. This was not done as a first pass to
109 : // minimize amount of delta in the initial PR.
110 : // - "write-only" attributes should return UNSUPPORTED_READ (this is done here)
111 3234 : if (info.has_value() && !info->GetReadPrivilege().has_value())
112 : {
113 0 : return CHIP_IM_GLOBAL_STATUS(UnsupportedRead);
114 : }
115 :
116 3234 : return std::nullopt;
117 : }
118 0 : VerifyOrReturnError((err == CHIP_ERROR_ACCESS_DENIED) || (err == CHIP_ERROR_ACCESS_RESTRICTED_BY_ARL), err);
119 :
120 : // Implementation of 8.4.3.2 of the spec for path expansion
121 0 : if (path.mExpanded)
122 : {
123 0 : return CHIP_NO_ERROR;
124 : }
125 :
126 : // access denied and access restricted have specific codes for IM
127 0 : return err == CHIP_ERROR_ACCESS_DENIED ? CHIP_IM_GLOBAL_STATUS(UnsupportedAccess) : CHIP_IM_GLOBAL_STATUS(AccessRestricted);
128 4424 : }
129 :
130 4424 : DataModel::ActionReturnStatus RetrieveClusterData(DataModel::Provider * dataModel, const SubjectDescriptor & subjectDescriptor,
131 : bool isFabricFiltered, AttributeReportIBs::Builder & reportBuilder,
132 : const ConcreteReadAttributePath & path, AttributeEncodeState * encoderState)
133 : {
134 4424 : ChipLogDetail(DataManagement, "<RE:Run> Cluster %" PRIx32 ", Attribute %" PRIx32 " is dirty", path.mClusterId,
135 : path.mAttributeId);
136 4424 : DataModelCallbacks::GetInstance()->AttributeOperation(DataModelCallbacks::OperationType::Read,
137 : DataModelCallbacks::OperationOrder::Pre, path);
138 :
139 4424 : DataModel::ReadAttributeRequest readRequest;
140 :
141 4424 : readRequest.readFlags.Set(DataModel::ReadFlags::kFabricFiltered, isFabricFiltered);
142 4424 : readRequest.subjectDescriptor = &subjectDescriptor;
143 4424 : readRequest.path = path;
144 :
145 4424 : DataModel::ServerClusterFinder serverClusterFinder(dataModel);
146 :
147 4424 : DataVersion version = 0;
148 4424 : if (auto clusterInfo = serverClusterFinder.Find(path); clusterInfo.has_value())
149 : {
150 4328 : version = clusterInfo->dataVersion;
151 : }
152 : else
153 : {
154 96 : ChipLogError(DataManagement, "Read request on unknown cluster - no data version available");
155 : }
156 :
157 4424 : TLV::TLVWriter checkpoint;
158 4424 : reportBuilder.Checkpoint(checkpoint);
159 :
160 4424 : DataModel::ActionReturnStatus status(CHIP_NO_ERROR);
161 4424 : AttributeValueEncoder attributeValueEncoder(reportBuilder, subjectDescriptor, path, version, isFabricFiltered, encoderState);
162 :
163 : // TODO: we explicitly DO NOT validate that path is a valid cluster path (even more, above serverClusterFinder
164 : // explicitly ignores that case). This means that global attribute reads as well as ReadAttribute
165 : // can be passed invalid paths when an invalid Read is detected and must handle them.
166 : //
167 : // See https://github.com/project-chip/connectedhomeip/issues/37410
168 :
169 4424 : if (auto access_status = ValidateReadAttributeACL(dataModel, subjectDescriptor, path); access_status.has_value())
170 : {
171 0 : status = *access_status;
172 : }
173 4424 : else if (IsSupportedGlobalAttributeNotInMetadata(readRequest.path.mAttributeId))
174 : {
175 : // Global attributes are NOT directly handled by data model providers, instead
176 : // the are routed through metadata.
177 1190 : status = ReadGlobalAttributeFromMetadata(dataModel, readRequest.path, attributeValueEncoder);
178 : }
179 : else
180 : {
181 3234 : status = dataModel->ReadAttribute(readRequest, attributeValueEncoder);
182 : }
183 :
184 4424 : if (status.IsSuccess())
185 : {
186 : // TODO: this callback being only executed on success is awkward. The Write callback is always done
187 : // for both read and write.
188 : //
189 : // For now this preserves existing/previous code logic, however we should consider to ALWAYS
190 : // call this.
191 4029 : DataModelCallbacks::GetInstance()->AttributeOperation(DataModelCallbacks::OperationType::Read,
192 : DataModelCallbacks::OperationOrder::Post, path);
193 4029 : return status;
194 : }
195 :
196 : // Encoder state is relevant for errors in case they are retryable.
197 : //
198 : // Generally only out of space encoding errors would be retryable, however we save the state
199 : // for all errors in case this is information that is useful (retry or error position).
200 395 : if (encoderState != nullptr)
201 : {
202 395 : *encoderState = attributeValueEncoder.GetState();
203 : }
204 :
205 : #if CHIP_CONFIG_DATA_MODEL_EXTRA_LOGGING
206 : // Out of space errors may be chunked data, reporting those cases would be very confusing
207 : // as they are not fully errors. Report only others (which presumably are not recoverable
208 : // and will be sent to the client as well).
209 395 : if (!status.IsOutOfSpaceEncodingResponse())
210 : {
211 0 : DataModel::ActionReturnStatus::StringStorage storage;
212 0 : ChipLogError(DataManagement, "Failed to read attribute: %s", status.c_str(storage));
213 : }
214 : #endif
215 395 : return status;
216 4424 : }
217 :
218 109 : bool IsClusterDataVersionEqualTo(DataModel::Provider * dataModel, const ConcreteClusterPath & path, DataVersion dataVersion)
219 : {
220 109 : DataModel::ServerClusterFinder serverClusterFinder(dataModel);
221 109 : auto info = serverClusterFinder.Find(path);
222 :
223 109 : return info.has_value() && (info->dataVersion == dataVersion);
224 109 : }
225 :
226 : /// Check if the given `err` is a known ACL error that can be translated into
227 : /// a StatusIB (UnsupportedAccess/AccessRestricted)
228 : ///
229 : /// Returns true if the error could be translated and places the result into `outStatus`.
230 : /// `path` is used for logging.
231 110 : bool IsTranslatableAclError(const ConcreteEventPath & path, const CHIP_ERROR & err, StatusIB & outStatus)
232 : {
233 110 : if ((err != CHIP_ERROR_ACCESS_DENIED) && (err != CHIP_ERROR_ACCESS_RESTRICTED_BY_ARL))
234 : {
235 108 : return false;
236 : }
237 :
238 2 : ChipLogDetail(InteractionModel, "Access to event (%u, " ChipLogFormatMEI ", " ChipLogFormatMEI ") denied by %s",
239 : path.mEndpointId, ChipLogValueMEI(path.mClusterId), ChipLogValueMEI(path.mEventId),
240 : err == CHIP_ERROR_ACCESS_DENIED ? "ACL" : "ARL");
241 :
242 2 : outStatus = err == CHIP_ERROR_ACCESS_DENIED ? StatusIB(Status::UnsupportedAccess) : StatusIB(Status::AccessRestricted);
243 2 : return true;
244 : }
245 :
246 56 : CHIP_ERROR CheckEventValidity(const ConcreteEventPath & path, const SubjectDescriptor & subjectDescriptor,
247 : DataModel::Provider * provider, StatusIB & outStatus)
248 : {
249 : // We validate ACL before Path, however this means we do not want the real ACL check
250 : // to be blocked by a `Invalid endpoint id` error when checking event info.
251 : // As a result, we check for VIEW privilege on the cluster first (most permissive)
252 : // and will do a 2nd check for the actual required privilege as a followup.
253 56 : RequestPath requestPath{
254 56 : .cluster = path.mClusterId,
255 56 : .endpoint = path.mEndpointId,
256 : .requestType = RequestType::kEventReadRequest,
257 56 : };
258 56 : CHIP_ERROR err = GetAccessControl().Check(subjectDescriptor, requestPath, Access::Privilege::kView);
259 56 : if (IsTranslatableAclError(path, err, outStatus))
260 : {
261 2 : return CHIP_NO_ERROR;
262 : }
263 54 : ReturnErrorOnFailure(err);
264 :
265 : DataModel::EventEntry eventInfo;
266 54 : err = provider->EventInfo(path, eventInfo);
267 54 : if (err != CHIP_NO_ERROR)
268 : {
269 : // cannot get event data to validate. Event is not supported.
270 : // we still fall through into "ValidateClusterPath" to try to return a `better code`
271 : // (i.e. say invalid endpoint or cluser), however if path seems ok we will
272 : // return unsupported event as we failed to get event metadata.
273 0 : outStatus = StatusIB(Status::UnsupportedEvent);
274 : }
275 : else
276 : {
277 : // set up the status as "OK" as long as validation below works
278 54 : outStatus = StatusIB(Status::Success);
279 :
280 54 : requestPath.entityId = path.mEventId;
281 :
282 54 : err = GetAccessControl().Check(subjectDescriptor, requestPath, eventInfo.readPrivilege);
283 54 : if (IsTranslatableAclError(path, err, outStatus))
284 : {
285 0 : return CHIP_NO_ERROR;
286 : }
287 54 : ReturnErrorOnFailure(err);
288 : }
289 :
290 54 : Status status = DataModel::ValidateClusterPath(provider, path, Status::Success);
291 54 : if (status != Status::Success)
292 : {
293 : // a valid status available: failure
294 0 : outStatus = StatusIB(status);
295 0 : return CHIP_NO_ERROR;
296 : }
297 :
298 : // Status set above: could be success, but also UnsupportedEvent
299 54 : return CHIP_NO_ERROR;
300 : }
301 :
302 : } // namespace
303 :
304 74 : Engine::Engine(InteractionModelEngine * apImEngine) : mpImEngine(apImEngine) {}
305 :
306 417 : CHIP_ERROR Engine::Init(EventManagement * apEventManagement)
307 : {
308 417 : VerifyOrReturnError(apEventManagement != nullptr, CHIP_ERROR_INVALID_ARGUMENT);
309 417 : mNumReportsInFlight = 0;
310 417 : mCurReadHandlerIdx = 0;
311 417 : mpEventManagement = apEventManagement;
312 :
313 417 : return CHIP_NO_ERROR;
314 : }
315 :
316 296 : void Engine::Shutdown()
317 : {
318 : // Flush out the event buffer synchronously
319 296 : ScheduleUrgentEventDeliverySync();
320 :
321 296 : mNumReportsInFlight = 0;
322 296 : mCurReadHandlerIdx = 0;
323 296 : mGlobalDirtySet.ReleaseAll();
324 296 : }
325 :
326 4221 : bool Engine::IsClusterDataVersionMatch(const SingleLinkedListNode<DataVersionFilter> * aDataVersionFilterList,
327 : const ConcreteReadAttributePath & aPath)
328 : {
329 4221 : bool existPathMatch = false;
330 4221 : bool existVersionMismatch = false;
331 26085 : for (auto filter = aDataVersionFilterList; filter != nullptr; filter = filter->mpNext)
332 : {
333 21864 : if (aPath.mEndpointId == filter->mValue.mEndpointId && aPath.mClusterId == filter->mValue.mClusterId)
334 : {
335 109 : existPathMatch = true;
336 :
337 109 : if (!IsClusterDataVersionEqualTo(mpImEngine->GetDataModelProvider(),
338 218 : ConcreteClusterPath(filter->mValue.mEndpointId, filter->mValue.mClusterId),
339 109 : filter->mValue.mDataVersion.Value()))
340 : {
341 79 : existVersionMismatch = true;
342 : }
343 : }
344 : }
345 4221 : return existPathMatch && !existVersionMismatch;
346 : }
347 :
348 2495 : static bool IsOutOfWriterSpaceError(CHIP_ERROR err)
349 : {
350 2495 : return err == CHIP_ERROR_NO_MEMORY || err == CHIP_ERROR_BUFFER_TOO_SMALL;
351 : }
352 :
353 1980 : CHIP_ERROR Engine::BuildSingleReportDataAttributeReportIBs(ReportDataMessage::Builder & aReportDataBuilder,
354 : ReadHandler * apReadHandler, bool * apHasMoreChunks,
355 : bool * apHasEncodedData)
356 : {
357 1980 : CHIP_ERROR err = CHIP_NO_ERROR;
358 1980 : bool attributeDataWritten = false;
359 1980 : bool hasMoreChunks = true;
360 1980 : TLV::TLVWriter backup;
361 1980 : const uint32_t kReservedSizeEndOfReportIBs = 1;
362 1980 : bool reservedEndOfReportIBs = false;
363 :
364 1980 : aReportDataBuilder.Checkpoint(backup);
365 :
366 1980 : AttributeReportIBs::Builder & attributeReportIBs = aReportDataBuilder.CreateAttributeReportIBs();
367 1980 : size_t emptyReportDataLength = 0;
368 :
369 1980 : SuccessOrExit(err = aReportDataBuilder.GetError());
370 :
371 1980 : emptyReportDataLength = attributeReportIBs.GetWriter()->GetLengthWritten();
372 : //
373 : // Reserve enough space for closing out the Report IB list
374 : //
375 1980 : SuccessOrExit(err = attributeReportIBs.GetWriter()->ReserveBuffer(kReservedSizeEndOfReportIBs));
376 1980 : reservedEndOfReportIBs = true;
377 :
378 : {
379 : // TODO: Figure out how AttributePathExpandIterator should handle read
380 : // vs write paths.
381 1980 : ConcreteAttributePath readPath;
382 :
383 1980 : ChipLogDetail(DataManagement,
384 : "Building Reports for ReadHandler with LastReportGeneration = 0x" ChipLogFormatX64
385 : " DirtyGeneration = 0x" ChipLogFormatX64,
386 : ChipLogValueX64(apReadHandler->mPreviousReportsBeginGeneration),
387 : ChipLogValueX64(apReadHandler->mDirtyGeneration));
388 :
389 : // This ReadHandler is not generating reports, so we reset the iterator for a clean start.
390 1980 : if (!apReadHandler->IsReporting())
391 : {
392 1169 : apReadHandler->ResetPathIterator();
393 : }
394 :
395 : #if CONFIG_BUILD_FOR_HOST_UNIT_TEST
396 1980 : uint32_t attributesRead = 0;
397 : #endif
398 :
399 : // For each path included in the interested path of the read handler...
400 1980 : for (RollbackAttributePathExpandIterator iterator(mpImEngine->GetDataModelProvider(),
401 1980 : apReadHandler->AttributeIterationPosition());
402 6463 : iterator.Next(readPath); iterator.MarkCompleted())
403 : {
404 4898 : if (!apReadHandler->IsPriming())
405 : {
406 677 : bool concretePathDirty = false;
407 : // TODO: Optimize this implementation by making the iterator only emit intersected paths.
408 677 : mGlobalDirtySet.ForEachActiveObject([&](auto * dirtyPath) {
409 815 : if (dirtyPath->IsAttributePathSupersetOf(readPath))
410 : {
411 : // We don't need to worry about paths that were already marked dirty before the last time this read handler
412 : // started a report that it completed: those paths already got reported.
413 252 : if (dirtyPath->mGeneration > apReadHandler->mPreviousReportsBeginGeneration)
414 : {
415 249 : concretePathDirty = true;
416 249 : return Loop::Break;
417 : }
418 : }
419 566 : return Loop::Continue;
420 : });
421 :
422 677 : if (!concretePathDirty)
423 : {
424 : // This attribute is not dirty, we just skip this one.
425 428 : continue;
426 : }
427 : }
428 : else
429 : {
430 4221 : if (IsClusterDataVersionMatch(apReadHandler->GetDataVersionFilterList(), readPath))
431 : {
432 26 : continue;
433 : }
434 : }
435 :
436 : #if CONFIG_BUILD_FOR_HOST_UNIT_TEST
437 4444 : attributesRead++;
438 4444 : if (attributesRead > mMaxAttributesPerChunk)
439 : {
440 415 : ExitNow(err = CHIP_ERROR_BUFFER_TOO_SMALL);
441 : }
442 : #endif
443 :
444 : // If we are processing a read request, or the initial report of a subscription, just regard all paths as dirty
445 : // paths.
446 4424 : TLV::TLVWriter attributeBackup;
447 4424 : attributeReportIBs.Checkpoint(attributeBackup);
448 4424 : ConcreteReadAttributePath pathForRetrieval(readPath);
449 : // Load the saved state from previous encoding session for chunking of one single attribute (list chunking).
450 4424 : AttributeEncodeState encodeState = apReadHandler->GetAttributeEncodeState();
451 : DataModel::ActionReturnStatus status =
452 13272 : RetrieveClusterData(mpImEngine->GetDataModelProvider(), apReadHandler->GetSubjectDescriptor(),
453 4424 : apReadHandler->IsFabricFiltered(), attributeReportIBs, pathForRetrieval, &encodeState);
454 4424 : if (status.IsError())
455 : {
456 : // Operation error set, since this will affect early return or override on status encoding
457 : // it will also be used for error reporting below.
458 395 : err = status.GetUnderlyingError();
459 :
460 : // If error is not an "out of writer space" error, rollback and encode status.
461 : // Otherwise, if partial data allowed, save the encode state.
462 : // Otherwise roll back. If we have already encoded some chunks, we are done; otherwise encode status.
463 :
464 395 : if (encodeState.AllowPartialData() && status.IsOutOfSpaceEncodingResponse())
465 : {
466 256 : ChipLogDetail(DataManagement,
467 : "List does not fit in packet, chunk between list items for clusterId: " ChipLogFormatMEI
468 : ", attributeId: " ChipLogFormatMEI,
469 : ChipLogValueMEI(pathForRetrieval.mClusterId), ChipLogValueMEI(pathForRetrieval.mAttributeId));
470 : // Encoding is aborted but partial data is allowed, then we don't rollback and save the state for next chunk.
471 : // The expectation is that RetrieveClusterData has already reset attributeReportIBs to a good state (rolled
472 : // back any partially-written AttributeReportIB instances, reset its error status). Since AllowPartialData()
473 : // is true, we may not have encoded a complete attribute value, but we did, if we encoded anything, encode a
474 : // set of complete AttributeReportIB instances that represent part of the attribute value.
475 256 : apReadHandler->SetAttributeEncodeState(encodeState);
476 : }
477 : else
478 : {
479 : // We met a error during writing reports, one common case is we are running out of buffer, rollback the
480 : // attributeReportIB to avoid any partial data.
481 139 : attributeReportIBs.Rollback(attributeBackup);
482 139 : apReadHandler->SetAttributeEncodeState(AttributeEncodeState());
483 :
484 139 : if (!status.IsOutOfSpaceEncodingResponse())
485 : {
486 0 : ChipLogError(DataManagement,
487 : "Fail to retrieve data, roll back and encode status on clusterId: " ChipLogFormatMEI
488 : ", attributeId: " ChipLogFormatMEI "err = %" CHIP_ERROR_FORMAT,
489 : ChipLogValueMEI(pathForRetrieval.mClusterId), ChipLogValueMEI(pathForRetrieval.mAttributeId),
490 : err.Format());
491 : // Try to encode our error as a status response.
492 0 : err = attributeReportIBs.EncodeAttributeStatus(pathForRetrieval, StatusIB(status.GetStatusCode()));
493 0 : if (err != CHIP_NO_ERROR)
494 : {
495 : // OK, just roll back again and give up; if we still ran out of space we
496 : // will send this status response in the next chunk.
497 0 : attributeReportIBs.Rollback(attributeBackup);
498 : }
499 : }
500 : else
501 : {
502 139 : ChipLogDetail(DataManagement,
503 : "Next attribute value does not fit in packet, roll back on clusterId: " ChipLogFormatMEI
504 : ", attributeId: " ChipLogFormatMEI ", err = %" CHIP_ERROR_FORMAT,
505 : ChipLogValueMEI(pathForRetrieval.mClusterId), ChipLogValueMEI(pathForRetrieval.mAttributeId),
506 : err.Format());
507 : }
508 : }
509 : }
510 4424 : SuccessOrExit(err);
511 : // Successfully encoded the attribute, clear the internal state.
512 4029 : apReadHandler->SetAttributeEncodeState(AttributeEncodeState());
513 1980 : }
514 :
515 : // We just visited all paths interested by this read handler and did not abort in the middle of iteration, there are no more
516 : // chunks for this report.
517 1565 : hasMoreChunks = false;
518 : }
519 1980 : exit:
520 1980 : if (attributeReportIBs.GetWriter()->GetLengthWritten() != emptyReportDataLength)
521 : {
522 : // We may encounter BUFFER_TOO_SMALL with nothing actually written for the case of list chunking, so we check if we have
523 : // actually
524 1305 : attributeDataWritten = true;
525 : }
526 :
527 1980 : if (apHasEncodedData != nullptr)
528 : {
529 1980 : *apHasEncodedData = attributeDataWritten;
530 : }
531 : //
532 : // Running out of space is an error that we're expected to handle - the incompletely written DataIB has already been rolled back
533 : // earlier to ensure only whole and complete DataIBs are present in the stream.
534 : //
535 : // We can safely clear out the error so that the rest of the machinery to close out the reports, etc. will function correctly.
536 : // These are are guaranteed to not fail since we've already reserved memory for the remaining 'close out' TLV operations in this
537 : // function and its callers.
538 : //
539 1980 : if (IsOutOfWriterSpaceError(err) && reservedEndOfReportIBs)
540 : {
541 415 : ChipLogDetail(DataManagement, "<RE:Run> We cannot put more chunks into this report. Enable chunking.");
542 415 : err = CHIP_NO_ERROR;
543 : }
544 :
545 : //
546 : // Only close out the report if we haven't hit an error yet so far.
547 : //
548 1980 : if (err == CHIP_NO_ERROR)
549 : {
550 1980 : attributeReportIBs.GetWriter()->UnreserveBuffer(kReservedSizeEndOfReportIBs);
551 :
552 1980 : err = attributeReportIBs.EndOfAttributeReportIBs();
553 :
554 : //
555 : // We reserved space for this earlier - consequently, the call to end the ReportIBs should
556 : // never fail, so assert if we do since that's a logic bug.
557 : //
558 1980 : VerifyOrDie(err == CHIP_NO_ERROR);
559 : }
560 :
561 : //
562 : // Rollback the the entire ReportIB array if we never wrote any attributes
563 : // AND never hit an error.
564 : //
565 1980 : if (!attributeDataWritten && err == CHIP_NO_ERROR)
566 : {
567 675 : aReportDataBuilder.Rollback(backup);
568 : }
569 :
570 : // hasMoreChunks + no data encoded is a flag that we have encountered some trouble when processing the attribute.
571 : // BuildAndSendSingleReportData will abort the read transaction if we encoded no attribute and no events but hasMoreChunks is
572 : // set.
573 1980 : if (apHasMoreChunks != nullptr)
574 : {
575 1980 : *apHasMoreChunks = hasMoreChunks;
576 : }
577 :
578 1980 : return err;
579 : }
580 :
581 863 : CHIP_ERROR Engine::CheckAccessDeniedEventPaths(TLV::TLVWriter & aWriter, bool & aHasEncodedData, ReadHandler * apReadHandler)
582 : {
583 : using Protocols::InteractionModel::Status;
584 :
585 863 : CHIP_ERROR err = CHIP_NO_ERROR;
586 1756 : for (auto current = apReadHandler->mpEventPathList; current != nullptr;)
587 : {
588 893 : if (current->mValue.IsWildcardPath())
589 : {
590 837 : current = current->mpNext;
591 837 : continue;
592 : }
593 :
594 56 : ConcreteEventPath path(current->mValue.mEndpointId, current->mValue.mClusterId, current->mValue.mEventId);
595 :
596 56 : StatusIB statusIB;
597 :
598 56 : ReturnErrorOnFailure(
599 : CheckEventValidity(path, apReadHandler->GetSubjectDescriptor(), mpImEngine->GetDataModelProvider(), statusIB));
600 :
601 56 : if (statusIB.IsFailure())
602 : {
603 2 : TLV::TLVWriter checkpoint = aWriter;
604 2 : err = EventReportIB::ConstructEventStatusIB(aWriter, path, statusIB);
605 2 : if (err != CHIP_NO_ERROR)
606 : {
607 0 : aWriter = checkpoint;
608 0 : break;
609 : }
610 2 : aHasEncodedData = true;
611 : }
612 :
613 56 : current = current->mpNext;
614 : }
615 :
616 863 : return err;
617 : }
618 :
619 1980 : CHIP_ERROR Engine::BuildSingleReportDataEventReports(ReportDataMessage::Builder & aReportDataBuilder, ReadHandler * apReadHandler,
620 : bool aBufferIsUsed, bool * apHasMoreChunks, bool * apHasEncodedData)
621 : {
622 1980 : CHIP_ERROR err = CHIP_NO_ERROR;
623 1980 : size_t eventCount = 0;
624 1980 : bool hasEncodedStatus = false;
625 1980 : TLV::TLVWriter backup;
626 1980 : bool eventClean = true;
627 1980 : auto & eventMin = apReadHandler->GetEventMin();
628 1980 : bool hasMoreChunks = false;
629 :
630 1980 : aReportDataBuilder.Checkpoint(backup);
631 :
632 1980 : VerifyOrExit(apReadHandler->GetEventPathList() != nullptr, );
633 :
634 : // If the mpEventManagement is not valid or has not been initialized,
635 : // skip the rest of processing
636 890 : VerifyOrExit(mpEventManagement != nullptr && mpEventManagement->IsValid(),
637 : ChipLogError(DataManagement, "EventManagement has not yet initialized"));
638 :
639 887 : eventClean = apReadHandler->CheckEventClean(*mpEventManagement);
640 :
641 : // proceed only if there are new events.
642 887 : if (eventClean)
643 : {
644 24 : ExitNow(); // Read clean, move along
645 : }
646 :
647 : {
648 : // Just like what we do in BuildSingleReportDataAttributeReportIBs(), we need to reserve one byte for end of container tag
649 : // when encoding events to ensure we can close the container successfully.
650 863 : const uint32_t kReservedSizeEndOfReportIBs = 1;
651 863 : EventReportIBs::Builder & eventReportIBs = aReportDataBuilder.CreateEventReports();
652 863 : SuccessOrExit(err = aReportDataBuilder.GetError());
653 863 : VerifyOrExit(eventReportIBs.GetWriter() != nullptr, err = CHIP_ERROR_INCORRECT_STATE);
654 863 : SuccessOrExit(err = eventReportIBs.GetWriter()->ReserveBuffer(kReservedSizeEndOfReportIBs));
655 :
656 863 : err = CheckAccessDeniedEventPaths(*(eventReportIBs.GetWriter()), hasEncodedStatus, apReadHandler);
657 863 : SuccessOrExit(err);
658 :
659 863 : err = mpEventManagement->FetchEventsSince(*(eventReportIBs.GetWriter()), apReadHandler->GetEventPathList(), eventMin,
660 863 : eventCount, apReadHandler->GetSubjectDescriptor());
661 :
662 863 : if ((err == CHIP_END_OF_TLV) || (err == CHIP_ERROR_TLV_UNDERRUN) || (err == CHIP_NO_ERROR))
663 : {
664 348 : err = CHIP_NO_ERROR;
665 348 : hasMoreChunks = false;
666 : }
667 515 : else if (IsOutOfWriterSpaceError(err))
668 : {
669 : // when first cluster event is too big to fit in the packet, ignore that cluster event.
670 : // However, we may have encoded some attributes before, we don't skip it in that case.
671 515 : if (eventCount == 0)
672 : {
673 206 : if (!aBufferIsUsed)
674 : {
675 0 : eventMin++;
676 : }
677 206 : ChipLogDetail(DataManagement, "<RE:Run> first cluster event is too big so that it fails to fit in the packet!");
678 206 : err = CHIP_NO_ERROR;
679 : }
680 : else
681 : {
682 : // `FetchEventsSince` has filled the available space
683 : // within the allowed buffer before it fit all the
684 : // available events. This is an expected condition,
685 : // so we do not propagate the error to higher levels;
686 : // instead, we terminate the event processing for now
687 309 : err = CHIP_NO_ERROR;
688 : }
689 515 : hasMoreChunks = true;
690 : }
691 : else
692 : {
693 : // All other errors are propagated to higher level.
694 : // Exiting here and returning an error will lead to
695 : // abandoning subscription.
696 0 : ExitNow();
697 : }
698 :
699 863 : SuccessOrExit(err = eventReportIBs.GetWriter()->UnreserveBuffer(kReservedSizeEndOfReportIBs));
700 863 : SuccessOrExit(err = eventReportIBs.EndOfEventReports());
701 : }
702 863 : ChipLogDetail(DataManagement, "Fetched %u events", static_cast<unsigned int>(eventCount));
703 :
704 0 : exit:
705 1980 : if (apHasEncodedData != nullptr)
706 : {
707 1980 : *apHasEncodedData = hasEncodedStatus || (eventCount != 0);
708 : }
709 :
710 : // Maybe encoding the attributes has already used up all space.
711 1980 : if ((err == CHIP_NO_ERROR || IsOutOfWriterSpaceError(err)) && !(hasEncodedStatus || (eventCount != 0)))
712 : {
713 1340 : aReportDataBuilder.Rollback(backup);
714 1340 : err = CHIP_NO_ERROR;
715 : }
716 :
717 : // hasMoreChunks + no data encoded is a flag that we have encountered some trouble when processing the attribute.
718 : // BuildAndSendSingleReportData will abort the read transaction if we encoded no attribute and no events but hasMoreChunks is
719 : // set.
720 1980 : if (apHasMoreChunks != nullptr)
721 : {
722 1980 : *apHasMoreChunks = hasMoreChunks;
723 : }
724 1980 : return err;
725 : }
726 :
727 1980 : CHIP_ERROR Engine::BuildAndSendSingleReportData(ReadHandler * apReadHandler)
728 : {
729 1980 : CHIP_ERROR err = CHIP_NO_ERROR;
730 1980 : System::PacketBufferTLVWriter reportDataWriter;
731 1980 : ReportDataMessage::Builder reportDataBuilder;
732 1980 : System::PacketBufferHandle bufHandle = nullptr;
733 1980 : uint16_t reservedSize = 0;
734 1980 : bool hasMoreChunks = false;
735 1980 : bool needCloseReadHandler = false;
736 1980 : size_t reportBufferMaxSize = 0;
737 :
738 : // Reserved size for the MoreChunks boolean flag, which takes up 1 byte for the control tag and 1 byte for the context tag.
739 1980 : const uint32_t kReservedSizeForMoreChunksFlag = 1 + 1;
740 :
741 : // Reserved size for the uint8_t InteractionModelRevision flag, which takes up 1 byte for the control tag and 1 byte for the
742 : // context tag, 1 byte for value
743 1980 : const uint32_t kReservedSizeForIMRevision = 1 + 1 + 1;
744 :
745 : // Reserved size for the end of report message, which is an end-of-container (i.e 1 byte for the control tag).
746 1980 : const uint32_t kReservedSizeForEndOfReportMessage = 1;
747 :
748 : // Reserved size for an empty EventReportIBs, so we can at least check if there are any events need to be reported.
749 1980 : const uint32_t kReservedSizeForEventReportIBs = 3; // type, tag, end of container
750 :
751 1980 : VerifyOrExit(apReadHandler != nullptr, err = CHIP_ERROR_INVALID_ARGUMENT);
752 1980 : VerifyOrExit(apReadHandler->GetSession() != nullptr, err = CHIP_ERROR_INCORRECT_STATE);
753 :
754 1980 : reportBufferMaxSize = apReadHandler->GetReportBufferMaxSize();
755 :
756 1980 : bufHandle = System::PacketBufferHandle::New(reportBufferMaxSize);
757 1980 : VerifyOrExit(!bufHandle.IsNull(), err = CHIP_ERROR_NO_MEMORY);
758 :
759 1980 : if (bufHandle->AvailableDataLength() > reportBufferMaxSize)
760 : {
761 0 : reservedSize = static_cast<uint16_t>(bufHandle->AvailableDataLength() - reportBufferMaxSize);
762 : }
763 :
764 1980 : reportDataWriter.Init(std::move(bufHandle));
765 :
766 : #if CONFIG_BUILD_FOR_HOST_UNIT_TEST
767 1980 : reportDataWriter.ReserveBuffer(mReservedSize);
768 : #endif
769 :
770 : // Always limit the size of the generated packet to fit within the max size returned by the ReadHandler regardless
771 : // of the available buffer capacity.
772 : // Also, we need to reserve some extra space for the MIC field.
773 1980 : reportDataWriter.ReserveBuffer(static_cast<uint32_t>(reservedSize + Crypto::CHIP_CRYPTO_AEAD_MIC_LENGTH_BYTES));
774 :
775 : // Create a report data.
776 1980 : err = reportDataBuilder.Init(&reportDataWriter);
777 1980 : SuccessOrExit(err);
778 :
779 1980 : if (apReadHandler->IsType(ReadHandler::InteractionType::Subscribe))
780 : {
781 : #if CHIP_CONFIG_ENABLE_ICD_SERVER
782 : // Notify the ICDManager that we are about to send a subscription report before we prepare the Report payload.
783 : // This allows the ICDManager to trigger any necessary updates and have the information in the report about to be sent.
784 : app::ICDNotifier::GetInstance().NotifySubscriptionReport();
785 : #endif // CHIP_CONFIG_ENABLE_ICD_SERVER
786 :
787 438 : SubscriptionId subscriptionId = 0;
788 438 : apReadHandler->GetSubscriptionId(subscriptionId);
789 438 : reportDataBuilder.SubscriptionId(subscriptionId);
790 : }
791 :
792 1980 : SuccessOrExit(err = reportDataWriter.ReserveBuffer(kReservedSizeForMoreChunksFlag + kReservedSizeForIMRevision +
793 : kReservedSizeForEndOfReportMessage + kReservedSizeForEventReportIBs));
794 :
795 : {
796 1980 : bool hasMoreChunksForAttributes = false;
797 1980 : bool hasMoreChunksForEvents = false;
798 1980 : bool hasEncodedAttributes = false;
799 1980 : bool hasEncodedEvents = false;
800 :
801 1980 : err = BuildSingleReportDataAttributeReportIBs(reportDataBuilder, apReadHandler, &hasMoreChunksForAttributes,
802 : &hasEncodedAttributes);
803 2011 : SuccessOrExit(err);
804 1980 : SuccessOrExit(err = reportDataWriter.UnreserveBuffer(kReservedSizeForEventReportIBs));
805 1980 : err = BuildSingleReportDataEventReports(reportDataBuilder, apReadHandler, hasEncodedAttributes, &hasMoreChunksForEvents,
806 : &hasEncodedEvents);
807 1980 : SuccessOrExit(err);
808 :
809 1980 : hasMoreChunks = hasMoreChunksForAttributes || hasMoreChunksForEvents;
810 :
811 1980 : if (!hasEncodedAttributes && !hasEncodedEvents && hasMoreChunks)
812 : {
813 31 : ChipLogError(DataManagement,
814 : "No data actually encoded but hasMoreChunks flag is set, close read handler! (attribute too big?)");
815 31 : err = apReadHandler->SendStatusReport(Protocols::InteractionModel::Status::ResourceExhausted);
816 31 : if (err == CHIP_NO_ERROR)
817 : {
818 31 : needCloseReadHandler = true;
819 : }
820 31 : ExitNow();
821 : }
822 : }
823 :
824 1949 : SuccessOrExit(err = reportDataBuilder.GetError());
825 1949 : SuccessOrExit(err = reportDataWriter.UnreserveBuffer(kReservedSizeForMoreChunksFlag + kReservedSizeForIMRevision +
826 : kReservedSizeForEndOfReportMessage));
827 1949 : if (hasMoreChunks)
828 : {
829 865 : reportDataBuilder.MoreChunkedMessages(true);
830 : }
831 1084 : else if (apReadHandler->IsType(ReadHandler::InteractionType::Read))
832 : {
833 704 : reportDataBuilder.SuppressResponse(true);
834 : }
835 :
836 1949 : reportDataBuilder.EndOfReportDataMessage();
837 :
838 : //
839 : // Since we've already reserved space for both the MoreChunked/SuppressResponse flags, as well as
840 : // the end-of-container flag for the end of the report, we should never hit an error closing out the message.
841 : //
842 1949 : VerifyOrDie(reportDataBuilder.GetError() == CHIP_NO_ERROR);
843 :
844 1949 : err = reportDataWriter.Finalize(&bufHandle);
845 1949 : SuccessOrExit(err);
846 :
847 1949 : ChipLogDetail(DataManagement, "<RE> Sending report (payload has %" PRIu32 " bytes)...", reportDataWriter.GetLengthWritten());
848 1949 : err = SendReport(apReadHandler, std::move(bufHandle), hasMoreChunks);
849 1949 : VerifyOrExit(err == CHIP_NO_ERROR,
850 : ChipLogError(DataManagement, "<RE> Error sending out report data with %" CHIP_ERROR_FORMAT "!", err.Format()));
851 :
852 1945 : ChipLogDetail(DataManagement, "<RE> ReportsInFlight = %" PRIu32 " with readHandler %" PRIu32 ", RE has %s", mNumReportsInFlight,
853 : mCurReadHandlerIdx, hasMoreChunks ? "more messages" : "no more messages");
854 :
855 0 : exit:
856 1980 : if (err != CHIP_NO_ERROR || (apReadHandler->IsType(ReadHandler::InteractionType::Read) && !hasMoreChunks) ||
857 : needCloseReadHandler)
858 : {
859 : //
860 : // In the case of successful report generation and we're on the last chunk of a read, we don't expect
861 : // any further activity on this exchange. The EC layer will automatically close our EC, so shutdown the ReadHandler
862 : // gracefully.
863 : //
864 737 : apReadHandler->Close();
865 : }
866 :
867 1980 : return err;
868 1980 : }
869 :
870 1758 : void Engine::Run(System::Layer * aSystemLayer, void * apAppState)
871 : {
872 1758 : Engine * const pEngine = reinterpret_cast<Engine *>(apAppState);
873 1758 : pEngine->mRunScheduled = false;
874 1758 : pEngine->Run();
875 1758 : }
876 :
877 2154 : CHIP_ERROR Engine::ScheduleRun()
878 : {
879 2154 : if (IsRunScheduled())
880 : {
881 396 : return CHIP_NO_ERROR;
882 : }
883 :
884 1758 : Messaging::ExchangeManager * exchangeManager = mpImEngine->GetExchangeManager();
885 1758 : if (exchangeManager == nullptr)
886 : {
887 0 : return CHIP_ERROR_INCORRECT_STATE;
888 : }
889 1758 : SessionManager * sessionManager = exchangeManager->GetSessionManager();
890 1758 : if (sessionManager == nullptr)
891 : {
892 0 : return CHIP_ERROR_INCORRECT_STATE;
893 : }
894 1758 : System::Layer * systemLayer = sessionManager->SystemLayer();
895 1758 : if (systemLayer == nullptr)
896 : {
897 0 : return CHIP_ERROR_INCORRECT_STATE;
898 : }
899 1758 : ReturnErrorOnFailure(systemLayer->ScheduleWork(Run, this));
900 1758 : mRunScheduled = true;
901 1758 : return CHIP_NO_ERROR;
902 : }
903 :
904 2054 : void Engine::Run()
905 : {
906 2054 : uint32_t numReadHandled = 0;
907 :
908 : // We may be deallocating read handlers as we go. Track how many we had
909 : // initially, so we make sure to go through all of them.
910 2054 : size_t initialAllocated = mpImEngine->mReadHandlers.Allocated();
911 4271 : while ((mNumReportsInFlight < CHIP_IM_MAX_REPORTS_IN_FLIGHT) && (numReadHandled < initialAllocated))
912 : {
913 : ReadHandler * readHandler =
914 2221 : mpImEngine->ActiveHandlerAt(mCurReadHandlerIdx % (uint32_t) mpImEngine->mReadHandlers.Allocated());
915 2221 : VerifyOrDie(readHandler != nullptr);
916 :
917 2221 : if (readHandler->ShouldReportUnscheduled() || mpImEngine->GetReportScheduler()->IsReportableNow(readHandler))
918 : {
919 :
920 1979 : mRunningReadHandler = readHandler;
921 1979 : CHIP_ERROR err = BuildAndSendSingleReportData(readHandler);
922 1979 : mRunningReadHandler = nullptr;
923 1979 : if (err != CHIP_NO_ERROR)
924 : {
925 4 : return;
926 : }
927 : }
928 :
929 2217 : numReadHandled++;
930 : // If readHandler removed itself from our list, we also decremented
931 : // mCurReadHandlerIdx to account for that removal, so it's safe to
932 : // increment here.
933 2217 : mCurReadHandlerIdx++;
934 : }
935 :
936 : //
937 : // If our tracker has exceeded the bounds of the handler list, reset it back to 0.
938 : // This isn't strictly necessary, but does make it easier to debug issues in this code if they
939 : // do arise.
940 : //
941 2050 : if (mCurReadHandlerIdx >= mpImEngine->mReadHandlers.Allocated())
942 : {
943 1993 : mCurReadHandlerIdx = 0;
944 : }
945 :
946 2050 : bool allReadClean = true;
947 :
948 2050 : mpImEngine->mReadHandlers.ForEachActiveObject([&allReadClean](ReadHandler * handler) {
949 2873 : if (handler->IsDirty())
950 : {
951 867 : allReadClean = false;
952 867 : return Loop::Break;
953 : }
954 :
955 2006 : return Loop::Continue;
956 : });
957 :
958 2050 : if (allReadClean)
959 : {
960 1183 : ChipLogDetail(DataManagement, "All ReadHandler-s are clean, clear GlobalDirtySet");
961 :
962 1183 : mGlobalDirtySet.ReleaseAll();
963 : }
964 : }
965 :
966 276 : bool Engine::MergeOverlappedAttributePath(const AttributePathParams & aAttributePath)
967 : {
968 276 : return Loop::Break == mGlobalDirtySet.ForEachActiveObject([&](auto * path) {
969 214 : if (path->IsAttributePathSupersetOf(aAttributePath))
970 : {
971 112 : path->mGeneration = GetDirtySetGeneration();
972 112 : return Loop::Break;
973 : }
974 102 : if (aAttributePath.IsAttributePathSupersetOf(*path))
975 : {
976 : // TODO: the wildcard input path may be superset of next paths in globalDirtySet, it is fine at this moment, since
977 : // when building report, it would use the first path of globalDirtySet to compare against interested paths read clients
978 : // want.
979 : // It is better to eliminate the duplicate wildcard paths in follow-up
980 2 : path->mGeneration = GetDirtySetGeneration();
981 2 : path->mEndpointId = aAttributePath.mEndpointId;
982 2 : path->mClusterId = aAttributePath.mClusterId;
983 2 : path->mListIndex = aAttributePath.mListIndex;
984 2 : path->mAttributeId = aAttributePath.mAttributeId;
985 2 : return Loop::Break;
986 : }
987 100 : return Loop::Continue;
988 276 : });
989 : }
990 :
991 8 : bool Engine::ClearTombPaths()
992 : {
993 8 : bool pathReleased = false;
994 8 : mGlobalDirtySet.ForEachActiveObject([&](auto * path) {
995 64 : if (path->mGeneration == 0)
996 : {
997 28 : mGlobalDirtySet.ReleaseObject(path);
998 28 : pathReleased = true;
999 : }
1000 64 : return Loop::Continue;
1001 : });
1002 8 : return pathReleased;
1003 : }
1004 :
1005 5 : bool Engine::MergeDirtyPathsUnderSameCluster()
1006 : {
1007 5 : mGlobalDirtySet.ForEachActiveObject([&](auto * outerPath) {
1008 40 : if (outerPath->HasWildcardClusterId() || outerPath->mGeneration == 0)
1009 : {
1010 14 : return Loop::Continue;
1011 : }
1012 26 : mGlobalDirtySet.ForEachActiveObject([&](auto * innerPath) {
1013 208 : if (innerPath == outerPath)
1014 : {
1015 26 : return Loop::Continue;
1016 : }
1017 : // We don't support paths with a wildcard endpoint + a concrete cluster in global dirty set, so we do a simple == check
1018 : // here.
1019 182 : if (innerPath->mEndpointId != outerPath->mEndpointId || innerPath->mClusterId != outerPath->mClusterId)
1020 : {
1021 168 : return Loop::Continue;
1022 : }
1023 14 : if (innerPath->mGeneration > outerPath->mGeneration)
1024 : {
1025 0 : outerPath->mGeneration = innerPath->mGeneration;
1026 : }
1027 14 : outerPath->SetWildcardAttributeId();
1028 :
1029 : // The object pool does not allow us to release objects in a nested iteration, mark the path as a tomb by setting its
1030 : // generation to 0 and then clear it later.
1031 14 : innerPath->mGeneration = 0;
1032 14 : return Loop::Continue;
1033 : });
1034 26 : return Loop::Continue;
1035 : });
1036 :
1037 5 : return ClearTombPaths();
1038 : }
1039 :
1040 3 : bool Engine::MergeDirtyPathsUnderSameEndpoint()
1041 : {
1042 3 : mGlobalDirtySet.ForEachActiveObject([&](auto * outerPath) {
1043 24 : if (outerPath->HasWildcardEndpointId() || outerPath->mGeneration == 0)
1044 : {
1045 14 : return Loop::Continue;
1046 : }
1047 10 : mGlobalDirtySet.ForEachActiveObject([&](auto * innerPath) {
1048 80 : if (innerPath == outerPath)
1049 : {
1050 10 : return Loop::Continue;
1051 : }
1052 70 : if (innerPath->mEndpointId != outerPath->mEndpointId)
1053 : {
1054 56 : return Loop::Continue;
1055 : }
1056 14 : if (innerPath->mGeneration > outerPath->mGeneration)
1057 : {
1058 0 : outerPath->mGeneration = innerPath->mGeneration;
1059 : }
1060 14 : outerPath->SetWildcardClusterId();
1061 14 : outerPath->SetWildcardAttributeId();
1062 :
1063 : // The object pool does not allow us to release objects in a nested iteration, mark the path as a tomb by setting its
1064 : // generation to 0 and then clear it later.
1065 14 : innerPath->mGeneration = 0;
1066 14 : return Loop::Continue;
1067 : });
1068 10 : return Loop::Continue;
1069 : });
1070 3 : return ClearTombPaths();
1071 : }
1072 :
1073 189 : CHIP_ERROR Engine::InsertPathIntoDirtySet(const AttributePathParams & aAttributePath)
1074 : {
1075 189 : VerifyOrReturnError(!MergeOverlappedAttributePath(aAttributePath), CHIP_NO_ERROR);
1076 :
1077 82 : if (mGlobalDirtySet.Exhausted() && !MergeDirtyPathsUnderSameCluster() && !MergeDirtyPathsUnderSameEndpoint())
1078 : {
1079 1 : ChipLogDetail(DataManagement, "Global dirty set pool exhausted, merge all paths.");
1080 1 : mGlobalDirtySet.ReleaseAll();
1081 1 : auto object = mGlobalDirtySet.CreateObject();
1082 1 : object->mGeneration = GetDirtySetGeneration();
1083 : }
1084 :
1085 82 : VerifyOrReturnError(!MergeOverlappedAttributePath(aAttributePath), CHIP_NO_ERROR);
1086 79 : ChipLogDetail(DataManagement, "Cannot merge the new path into any existing path, create one.");
1087 :
1088 79 : auto object = mGlobalDirtySet.CreateObject();
1089 79 : if (object == nullptr)
1090 : {
1091 : // This should not happen, this path should be merged into the wildcard endpoint at least.
1092 0 : ChipLogError(DataManagement, "mGlobalDirtySet pool full, cannot handle more entries!");
1093 0 : return CHIP_ERROR_NO_MEMORY;
1094 : }
1095 79 : *object = aAttributePath;
1096 79 : object->mGeneration = GetDirtySetGeneration();
1097 :
1098 79 : return CHIP_NO_ERROR;
1099 : }
1100 :
1101 5412 : CHIP_ERROR Engine::SetDirty(const AttributePathParams & aAttributePath)
1102 : {
1103 5412 : BumpDirtySetGeneration();
1104 :
1105 5412 : bool intersectsInterestPath = false;
1106 5412 : DataModel::Provider * dataModel = mpImEngine->GetDataModelProvider();
1107 5412 : mpImEngine->mReadHandlers.ForEachActiveObject([&dataModel, &aAttributePath, &intersectsInterestPath](ReadHandler * handler) {
1108 : // We call AttributePathIsDirty for both read interactions and subscribe interactions, since we may send inconsistent
1109 : // attribute data between two chunks. AttributePathIsDirty will not schedule a new run for read handlers which are
1110 : // waiting for a response to the last message chunk for read interactions.
1111 477 : if (handler->CanStartReporting() || handler->IsAwaitingReportResponse())
1112 : {
1113 934 : for (auto object = handler->GetAttributePathList(); object != nullptr; object = object->mpNext)
1114 : {
1115 802 : if (object->mValue.Intersects(aAttributePath))
1116 : {
1117 345 : handler->AttributePathIsDirty(dataModel, aAttributePath);
1118 345 : intersectsInterestPath = true;
1119 345 : break;
1120 : }
1121 : }
1122 : }
1123 :
1124 477 : return Loop::Continue;
1125 : });
1126 :
1127 5412 : if (!intersectsInterestPath)
1128 : {
1129 5228 : return CHIP_NO_ERROR;
1130 : }
1131 184 : ReturnErrorOnFailure(InsertPathIntoDirtySet(aAttributePath));
1132 :
1133 184 : return CHIP_NO_ERROR;
1134 : }
1135 :
1136 1949 : CHIP_ERROR Engine::SendReport(ReadHandler * apReadHandler, System::PacketBufferHandle && aPayload, bool aHasMoreChunks)
1137 : {
1138 1949 : CHIP_ERROR err = CHIP_NO_ERROR;
1139 :
1140 : // We can only have 1 report in flight for any given read - increment and break out.
1141 1949 : mNumReportsInFlight++;
1142 1949 : err = apReadHandler->SendReportData(std::move(aPayload), aHasMoreChunks);
1143 1949 : if (err != CHIP_NO_ERROR)
1144 : {
1145 4 : --mNumReportsInFlight;
1146 : }
1147 1949 : return err;
1148 : }
1149 :
1150 1945 : void Engine::OnReportConfirm()
1151 : {
1152 1945 : VerifyOrDie(mNumReportsInFlight > 0);
1153 :
1154 1945 : if (mNumReportsInFlight == CHIP_IM_MAX_REPORTS_IN_FLIGHT)
1155 : {
1156 : // We could have other things waiting to go now that this report is no
1157 : // longer in flight.
1158 61 : ScheduleRun();
1159 : }
1160 1945 : mNumReportsInFlight--;
1161 1945 : ChipLogDetail(DataManagement, "<RE> OnReportConfirm: NumReports = %" PRIu32, mNumReportsInFlight);
1162 1945 : }
1163 :
1164 20 : void Engine::GetMinEventLogPosition(uint32_t & aMinLogPosition)
1165 : {
1166 20 : mpImEngine->mReadHandlers.ForEachActiveObject([&aMinLogPosition](ReadHandler * handler) {
1167 20 : if (handler->IsType(ReadHandler::InteractionType::Read))
1168 : {
1169 0 : return Loop::Continue;
1170 : }
1171 :
1172 20 : uint32_t initialWrittenEventsBytes = handler->GetLastWrittenEventsBytes();
1173 20 : if (initialWrittenEventsBytes < aMinLogPosition)
1174 : {
1175 20 : aMinLogPosition = initialWrittenEventsBytes;
1176 : }
1177 :
1178 20 : return Loop::Continue;
1179 : });
1180 20 : }
1181 :
1182 20 : CHIP_ERROR Engine::ScheduleBufferPressureEventDelivery(uint32_t aBytesWritten)
1183 : {
1184 20 : uint32_t minEventLogPosition = aBytesWritten;
1185 20 : GetMinEventLogPosition(minEventLogPosition);
1186 20 : if (aBytesWritten - minEventLogPosition > CHIP_CONFIG_EVENT_LOGGING_BYTE_THRESHOLD)
1187 : {
1188 0 : ChipLogDetail(DataManagement, "<RE> Buffer overfilled CHIP_CONFIG_EVENT_LOGGING_BYTE_THRESHOLD %d, schedule engine run",
1189 : CHIP_CONFIG_EVENT_LOGGING_BYTE_THRESHOLD);
1190 0 : return ScheduleRun();
1191 : }
1192 20 : return CHIP_NO_ERROR;
1193 : }
1194 :
1195 662 : CHIP_ERROR Engine::NewEventGenerated(ConcreteEventPath & aPath, uint32_t aBytesConsumed)
1196 : {
1197 : // If we literally have no read handlers right now that care about any events,
1198 : // we don't need to call schedule run for event.
1199 : // If schedule run is called, actually we would not delivery events as well.
1200 : // Just wanna save one schedule run here
1201 662 : if (mpImEngine->mEventPathPool.Allocated() == 0)
1202 : {
1203 630 : return CHIP_NO_ERROR;
1204 : }
1205 :
1206 32 : bool isUrgentEvent = false;
1207 32 : mpImEngine->mReadHandlers.ForEachActiveObject([&aPath, &isUrgentEvent](ReadHandler * handler) {
1208 40 : if (handler->IsType(ReadHandler::InteractionType::Read))
1209 : {
1210 0 : return Loop::Continue;
1211 : }
1212 :
1213 104 : for (auto * interestedPath = handler->GetEventPathList(); interestedPath != nullptr;
1214 64 : interestedPath = interestedPath->mpNext)
1215 : {
1216 76 : if (interestedPath->mValue.IsEventPathSupersetOf(aPath) && interestedPath->mValue.mIsUrgentEvent)
1217 : {
1218 12 : isUrgentEvent = true;
1219 12 : handler->ForceDirtyState();
1220 12 : break;
1221 : }
1222 : }
1223 :
1224 40 : return Loop::Continue;
1225 : });
1226 :
1227 32 : if (isUrgentEvent)
1228 : {
1229 12 : ChipLogDetail(DataManagement, "Urgent event will be sent once reporting is not blocked by the min interval");
1230 12 : return CHIP_NO_ERROR;
1231 : }
1232 :
1233 20 : return ScheduleBufferPressureEventDelivery(aBytesConsumed);
1234 : }
1235 :
1236 296 : void Engine::ScheduleUrgentEventDeliverySync(Optional<FabricIndex> fabricIndex)
1237 : {
1238 296 : mpImEngine->mReadHandlers.ForEachActiveObject([fabricIndex](ReadHandler * handler) {
1239 0 : if (handler->IsType(ReadHandler::InteractionType::Read))
1240 : {
1241 0 : return Loop::Continue;
1242 : }
1243 :
1244 0 : if (fabricIndex.HasValue() && fabricIndex.Value() != handler->GetAccessingFabricIndex())
1245 : {
1246 0 : return Loop::Continue;
1247 : }
1248 :
1249 0 : handler->ForceDirtyState();
1250 :
1251 0 : return Loop::Continue;
1252 : });
1253 :
1254 296 : Run();
1255 296 : }
1256 :
1257 5146 : void Engine::MarkDirty(const AttributePathParams & path)
1258 : {
1259 5146 : CHIP_ERROR err = SetDirty(path);
1260 5146 : if (err != CHIP_NO_ERROR)
1261 : {
1262 0 : ChipLogError(DataManagement, "Failed to set path dirty: %" CHIP_ERROR_FORMAT, err.Format());
1263 : }
1264 5146 : }
1265 :
1266 : } // namespace reporting
1267 : } // namespace app
1268 : } // namespace chip
|