Skip to content

memoir.memento package

memoir.memento

Memento module for specialized memory collections.

This module contains memento classes that manage specific types of memories: - Location: Spatial and geographical memories - Timeline: Temporal and chronological memories - Profile: Personal and identity-related memories

These classes represent collections of memories organized around specific themes, rather than traditional "managers" - they are memory repositories.

LocationMemento

Manages user location data and generates geographic event summaries.

Source code in src/memoir/memento/location.py
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
class LocationMemento:
    """Manages user location data and generates geographic event summaries."""

    def __init__(self, memory_store):
        """Initialize location memento with memory store."""
        self.memory_store = memory_store

    async def apply_location_events(
        self,
        location_events: list[dict[str, str]],
        metadata: dict | None = None,
        namespace: str = "default",
    ) -> None:
        """
        Apply location events to the memory store.

        For same-location events, retrieves existing content and merges with new event.

        Args:
            location_events: List of location events with location and description
            metadata: Optional metadata to include with events
            namespace: Namespace to store location events in (default: "default")
        """
        logger.debug(
            f"LocationManager.apply_location_events called with {len(location_events) if location_events else 0} events"
        )
        if not location_events:
            logger.debug("No location events provided to apply_location_events")
            return

        for event in location_events:
            location_name = event.get("location", "")
            description = event.get("description", "")

            if not location_name or not description:
                logger.warning(f"Invalid location event: {event}")
                continue

            # Normalize location name for consistent storage
            normalized_location = self._normalize_location_name(location_name)

            if not normalized_location:
                logger.debug(f"Invalid location name: {location_name}")
                continue

            # Create the location path
            location_path = f"location.{normalized_location}"

            try:
                await self._store_or_merge_location_event(
                    location_path, description, metadata, namespace
                )
                logger.debug(f"Applied location event: {location_path} - {description}")
            except Exception as e:
                logger.error(f"Failed to apply location event {location_path}: {e}")

    def _normalize_location_name(self, location_name: str) -> str:
        """
        Normalize location name for consistent storage.

        Args:
            location_name: Raw location name from LLM

        Returns:
            Normalized location name suitable for path storage
        """
        if not location_name or not isinstance(location_name, str):
            return ""

        # Clean and normalize the location name
        # Remove extra whitespace and convert to lowercase
        normalized = location_name.strip().lower()

        # Replace spaces and special characters with underscores
        normalized = re.sub(
            r"[^\w\s-]", "", normalized
        )  # Remove special chars except spaces and hyphens
        normalized = re.sub(
            r"[\s-]+", "_", normalized
        )  # Replace spaces/hyphens with underscores
        normalized = re.sub(r"_+", "_", normalized)  # Collapse multiple underscores
        normalized = normalized.strip("_")  # Remove leading/trailing underscores

        # Handle common location patterns and abbreviations
        location_mappings = {
            "new_york_city": "new_york_city",
            "nyc": "new_york_city",
            "ny": "new_york",
            "california": "california",
            "ca": "california",
            "san_francisco": "san_francisco",
            "sf": "san_francisco",
            "los_angeles": "los_angeles",
            "la": "los_angeles",
            "united_states": "united_states",
            "usa": "united_states",
            "us": "united_states",
        }

        # Apply mappings if available
        if normalized in location_mappings:
            normalized = location_mappings[normalized]

        # Ensure minimum length and validity
        if len(normalized) < 2:
            return ""

        return normalized

    async def _store_or_merge_location_event(
        self,
        location_path: str,
        description: str,
        metadata: dict | None = None,
        namespace: str = "default",
    ) -> None:
        """
        Store location event or merge with existing location events.

        Args:
            location_path: Storage path for the location (e.g., "location.san_francisco")
            description: Event description
            metadata: Optional metadata
            namespace: Namespace to store location data in (default: "default")
        """
        # namespace parameter is passed to function

        # Check if location already has events
        existing_items = await self.memory_store.asearch(namespace, location_path)

        if existing_items:
            # Merge with existing location events
            _, existing_data = existing_items[0]

            if isinstance(existing_data, str):
                existing_content = existing_data
            elif isinstance(existing_data, dict):
                existing_content = existing_data.get("raw_text", "")
            else:
                existing_content = str(existing_data)

            # Merge descriptions, avoiding duplicates
            merged_content = self._merge_location_descriptions(
                existing_content, description
            )

            content = {
                "raw_text": merged_content,
                "summary": f"Location events at {location_path.split('.')[1].replace('_', ' ').title()}",
                "structured_data": {
                    "location_name": location_path.split(".")[1]
                    .replace("_", " ")
                    .title(),
                    "location_content": merged_content,
                    "update_type": "location_event",
                },
                "memory_type": "location_event",
            }
        else:
            # Create new location event
            content = {
                "raw_text": description,
                "summary": f"Location event at {location_path.split('.')[1].replace('_', ' ').title()}",
                "structured_data": {
                    "location_name": location_path.split(".")[1]
                    .replace("_", " ")
                    .title(),
                    "location_content": description,
                    "update_type": "location_event",
                },
                "memory_type": "location_event",
            }

        # Include metadata if provided
        if metadata:
            content["metadata"] = metadata

        # Store the location event
        logger.debug(
            f"About to call store_memory_async with namespace='{namespace}', path='{location_path}'"
        )
        logger.debug(f"Content to store: {content}")

        result = await self.memory_store.store_memory_async(
            namespace, content, location_path
        )
        logger.debug(f"store_memory_async returned: {result}")

        # Debug: immediately test if we can find what we just stored
        try:
            test_search = await self.memory_store.asearch(namespace, location_path)
            logger.debug(
                f"Immediate search for '{location_path}' found {len(test_search)} items"
            )
            if test_search:
                logger.debug(f"Found item: {test_search[0]}")

            # Also try searching with prefix
            prefix_search = await self.memory_store.asearch(namespace, "location.")
            logger.debug(
                f"Prefix search for 'location.' found {len(prefix_search)} items"
            )

        except Exception as e:
            logger.debug(f"Immediate search test failed: {e}")

    def _merge_location_descriptions(self, existing: str, new: str) -> str:
        """
        Merge location event descriptions, avoiding duplicates.

        Args:
            existing: Existing location event descriptions
            new: New location event description

        Returns:
            Merged location descriptions
        """
        if not existing:
            return new

        if not new:
            return existing

        # Split by common delimiters
        existing_events = [
            event.strip() for event in existing.split("|") if event.strip()
        ]

        # Check if new event is already present (fuzzy matching)
        new_lower = new.lower()
        for existing_event in existing_events:
            if existing_event.lower() == new_lower:
                return existing  # Duplicate, return existing

        # Add new event
        existing_events.append(new.strip())
        return " | ".join(existing_events)

    async def get_location_summary(
        self, llm: Any | None = None, namespace: str = "default"
    ) -> str:
        """
        Generate a summary of all location events.

        Args:
            llm: Optional LLM for generating narrative summaries
            namespace: Namespace to search for location data (default: "default")

        Returns:
            String summary of location events
        """
        try:
            # namespace parameter is passed to function

            # Search for all location events
            logger.debug(
                f"Searching for location events with query: namespace='{namespace}', prefix='location.'"
            )
            all_items = await self.memory_store.asearch(namespace, "location.")
            logger.debug(f"Search returned {len(all_items)} items")

            # Debug: log what we found
            if all_items:
                logger.info(f"Found {len(all_items)} items with location. prefix")
                for item in all_items[:3]:  # Log first few items
                    logger.info(f"Location item: {item}")
            else:
                logger.debug("No items found with location. prefix")

                # Debug: search for ANY items with location data
                logger.debug("Searching for ANY items with location data...")
                all_items_debug = await self.memory_store.asearch(namespace, "")
                location_items_debug = []
                for path, data in all_items_debug:
                    if isinstance(data, dict) and (
                        data.get("memory_type") == "location_event"
                        or "location_name" in data.get("structured_data", {})
                    ):
                        location_items_debug.append((path, data))
                        logger.debug(f"Found location data under path: {path}")

                if location_items_debug:
                    logger.debug(
                        f"Found {len(location_items_debug)} location events but not under location.* paths!"
                    )
                    return self._generate_structured_location_summary(
                        location_items_debug
                    )
                else:
                    logger.debug("No location events found anywhere in memory store!")

            location_items = all_items  # All items should already have location. prefix

            if not location_items:
                return "No location events available."

            # If no LLM provided, generate structured summary
            if not llm:
                return self._generate_structured_location_summary(location_items)

            # Generate LLM-based narrative summary
            return await self._generate_llm_location_summary(location_items, llm)

        except Exception as e:
            logger.error(f"Failed to generate location summary: {e}")
            logger.error(f"Exception details: {type(e).__name__}: {e!s}")
            import traceback

            logger.error(f"Traceback: {traceback.format_exc()}")
            return "Error generating location summary."

    def _generate_structured_location_summary(self, location_items: list) -> str:
        """Generate a structured location summary without LLM."""
        summary_lines = ["=== USER LOCATION SUMMARY ===", ""]

        # Group and sort locations
        locations = {}
        for path, data in location_items:
            location_name = path.split(".", 1)[1].replace("_", " ").title()

            # Handle nested memory object structure from asearch results
            if isinstance(data, dict):
                # Check if this is a nested memory object with 'content' field
                if "content" in data and isinstance(data["content"], dict):
                    # Extract from nested structure: data['content']['raw_text']
                    content = data["content"].get("raw_text", str(data))
                else:
                    # Direct structure: data['raw_text']
                    content = data.get("raw_text", str(data))
            else:
                content = str(data)

            locations[location_name] = content

        # Sort locations alphabetically
        for location_name in sorted(locations.keys()):
            content = locations[location_name]
            summary_lines.append(f"{location_name}:")

            # Split multiple events and format nicely
            events = content.split(" | ")
            for event in events:
                if event.strip():
                    summary_lines.append(f"  - {event.strip()}")
            summary_lines.append("")

        return "\n".join(summary_lines)

    async def _generate_llm_location_summary(
        self, location_items: list, llm: Any
    ) -> str:
        """Generate an LLM-based narrative location summary."""
        # Prepare location data for LLM
        location_data = []
        for path, data in location_items:
            location_name = path.split(".", 1)[1].replace("_", " ").title()

            if isinstance(data, dict):
                content = data.get("raw_text", str(data))
            else:
                content = str(data)

            location_data.append(f"{location_name}: {content}")

        location_text = "\n".join(location_data)

        prompt = f"""Create a concise narrative summary of the user's location-related experiences and activities. Focus on places they've been, lived, worked, or had significant experiences.

Location Data:
{location_text}

Create a narrative summary that:
1. Groups related locations geographically when possible
2. Highlights significant places and experiences
3. Shows patterns in the user's movements or preferences
4. Keeps the summary concise but informative

Location Summary:"""

        try:
            response = await llm.ainvoke(prompt)
            return response.content.strip()
        except Exception as e:
            logger.error(f"LLM location summary failed: {e}")
            return self._generate_structured_location_summary(location_items)

    async def get_location_events_for_search(
        self, location_query: str, namespace: str = "default"
    ) -> list[dict]:
        """
        Get location events relevant to a search query.

        Args:
            location_query: Search query for locations
            namespace: Namespace to search for location data (default: "default")

        Returns:
            List of relevant location events
        """
        try:
            # namespace parameter is passed to function

            # Search for location events
            all_items = await self.memory_store.asearch(namespace, "location.")
            location_items = [
                (path, data) for path, data in all_items if path.startswith("location.")
            ]

            # Filter by relevance to query
            relevant_events = []
            query_lower = location_query.lower()

            for path, data in location_items:
                location_name = path.split(".", 1)[1].replace("_", " ")

                if isinstance(data, dict):
                    content = data.get("raw_text", str(data))
                else:
                    content = str(data)

                # Check if query matches location name or content
                if (
                    query_lower in location_name.lower()
                    or query_lower in content.lower()
                ):
                    relevant_events.append(
                        {
                            "location": location_name.title(),
                            "content": content,
                            "path": path,
                        }
                    )

            return relevant_events

        except Exception as e:
            logger.error(f"Failed to get location events for search: {e}")
            return []

__init__

__init__(memory_store)

Initialize location memento with memory store.

Source code in src/memoir/memento/location.py
def __init__(self, memory_store):
    """Initialize location memento with memory store."""
    self.memory_store = memory_store

apply_location_events async

apply_location_events(location_events: list[dict[str, str]], metadata: dict | None = None, namespace: str = 'default') -> None

Apply location events to the memory store.

For same-location events, retrieves existing content and merges with new event.

Parameters:

Name Type Description Default
location_events list[dict[str, str]]

List of location events with location and description

required
metadata dict | None

Optional metadata to include with events

None
namespace str

Namespace to store location events in (default: "default")

'default'
Source code in src/memoir/memento/location.py
async def apply_location_events(
    self,
    location_events: list[dict[str, str]],
    metadata: dict | None = None,
    namespace: str = "default",
) -> None:
    """
    Apply location events to the memory store.

    For same-location events, retrieves existing content and merges with new event.

    Args:
        location_events: List of location events with location and description
        metadata: Optional metadata to include with events
        namespace: Namespace to store location events in (default: "default")
    """
    logger.debug(
        f"LocationManager.apply_location_events called with {len(location_events) if location_events else 0} events"
    )
    if not location_events:
        logger.debug("No location events provided to apply_location_events")
        return

    for event in location_events:
        location_name = event.get("location", "")
        description = event.get("description", "")

        if not location_name or not description:
            logger.warning(f"Invalid location event: {event}")
            continue

        # Normalize location name for consistent storage
        normalized_location = self._normalize_location_name(location_name)

        if not normalized_location:
            logger.debug(f"Invalid location name: {location_name}")
            continue

        # Create the location path
        location_path = f"location.{normalized_location}"

        try:
            await self._store_or_merge_location_event(
                location_path, description, metadata, namespace
            )
            logger.debug(f"Applied location event: {location_path} - {description}")
        except Exception as e:
            logger.error(f"Failed to apply location event {location_path}: {e}")

get_location_summary async

get_location_summary(llm: Any | None = None, namespace: str = 'default') -> str

Generate a summary of all location events.

Parameters:

Name Type Description Default
llm Any | None

Optional LLM for generating narrative summaries

None
namespace str

Namespace to search for location data (default: "default")

'default'

Returns:

Type Description
str

String summary of location events

Source code in src/memoir/memento/location.py
async def get_location_summary(
    self, llm: Any | None = None, namespace: str = "default"
) -> str:
    """
    Generate a summary of all location events.

    Args:
        llm: Optional LLM for generating narrative summaries
        namespace: Namespace to search for location data (default: "default")

    Returns:
        String summary of location events
    """
    try:
        # namespace parameter is passed to function

        # Search for all location events
        logger.debug(
            f"Searching for location events with query: namespace='{namespace}', prefix='location.'"
        )
        all_items = await self.memory_store.asearch(namespace, "location.")
        logger.debug(f"Search returned {len(all_items)} items")

        # Debug: log what we found
        if all_items:
            logger.info(f"Found {len(all_items)} items with location. prefix")
            for item in all_items[:3]:  # Log first few items
                logger.info(f"Location item: {item}")
        else:
            logger.debug("No items found with location. prefix")

            # Debug: search for ANY items with location data
            logger.debug("Searching for ANY items with location data...")
            all_items_debug = await self.memory_store.asearch(namespace, "")
            location_items_debug = []
            for path, data in all_items_debug:
                if isinstance(data, dict) and (
                    data.get("memory_type") == "location_event"
                    or "location_name" in data.get("structured_data", {})
                ):
                    location_items_debug.append((path, data))
                    logger.debug(f"Found location data under path: {path}")

            if location_items_debug:
                logger.debug(
                    f"Found {len(location_items_debug)} location events but not under location.* paths!"
                )
                return self._generate_structured_location_summary(
                    location_items_debug
                )
            else:
                logger.debug("No location events found anywhere in memory store!")

        location_items = all_items  # All items should already have location. prefix

        if not location_items:
            return "No location events available."

        # If no LLM provided, generate structured summary
        if not llm:
            return self._generate_structured_location_summary(location_items)

        # Generate LLM-based narrative summary
        return await self._generate_llm_location_summary(location_items, llm)

    except Exception as e:
        logger.error(f"Failed to generate location summary: {e}")
        logger.error(f"Exception details: {type(e).__name__}: {e!s}")
        import traceback

        logger.error(f"Traceback: {traceback.format_exc()}")
        return "Error generating location summary."
get_location_events_for_search(location_query: str, namespace: str = 'default') -> list[dict]

Get location events relevant to a search query.

Parameters:

Name Type Description Default
location_query str

Search query for locations

required
namespace str

Namespace to search for location data (default: "default")

'default'

Returns:

Type Description
list[dict]

List of relevant location events

Source code in src/memoir/memento/location.py
async def get_location_events_for_search(
    self, location_query: str, namespace: str = "default"
) -> list[dict]:
    """
    Get location events relevant to a search query.

    Args:
        location_query: Search query for locations
        namespace: Namespace to search for location data (default: "default")

    Returns:
        List of relevant location events
    """
    try:
        # namespace parameter is passed to function

        # Search for location events
        all_items = await self.memory_store.asearch(namespace, "location.")
        location_items = [
            (path, data) for path, data in all_items if path.startswith("location.")
        ]

        # Filter by relevance to query
        relevant_events = []
        query_lower = location_query.lower()

        for path, data in location_items:
            location_name = path.split(".", 1)[1].replace("_", " ")

            if isinstance(data, dict):
                content = data.get("raw_text", str(data))
            else:
                content = str(data)

            # Check if query matches location name or content
            if (
                query_lower in location_name.lower()
                or query_lower in content.lower()
            ):
                relevant_events.append(
                    {
                        "location": location_name.title(),
                        "content": content,
                        "path": path,
                    }
                )

        return relevant_events

    except Exception as e:
        logger.error(f"Failed to get location events for search: {e}")
        return []

Location

Manages user location data and generates geographic event summaries.

Source code in src/memoir/memento/location.py
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
class LocationMemento:
    """Manages user location data and generates geographic event summaries."""

    def __init__(self, memory_store):
        """Initialize location memento with memory store."""
        self.memory_store = memory_store

    async def apply_location_events(
        self,
        location_events: list[dict[str, str]],
        metadata: dict | None = None,
        namespace: str = "default",
    ) -> None:
        """
        Apply location events to the memory store.

        For same-location events, retrieves existing content and merges with new event.

        Args:
            location_events: List of location events with location and description
            metadata: Optional metadata to include with events
            namespace: Namespace to store location events in (default: "default")
        """
        logger.debug(
            f"LocationManager.apply_location_events called with {len(location_events) if location_events else 0} events"
        )
        if not location_events:
            logger.debug("No location events provided to apply_location_events")
            return

        for event in location_events:
            location_name = event.get("location", "")
            description = event.get("description", "")

            if not location_name or not description:
                logger.warning(f"Invalid location event: {event}")
                continue

            # Normalize location name for consistent storage
            normalized_location = self._normalize_location_name(location_name)

            if not normalized_location:
                logger.debug(f"Invalid location name: {location_name}")
                continue

            # Create the location path
            location_path = f"location.{normalized_location}"

            try:
                await self._store_or_merge_location_event(
                    location_path, description, metadata, namespace
                )
                logger.debug(f"Applied location event: {location_path} - {description}")
            except Exception as e:
                logger.error(f"Failed to apply location event {location_path}: {e}")

    def _normalize_location_name(self, location_name: str) -> str:
        """
        Normalize location name for consistent storage.

        Args:
            location_name: Raw location name from LLM

        Returns:
            Normalized location name suitable for path storage
        """
        if not location_name or not isinstance(location_name, str):
            return ""

        # Clean and normalize the location name
        # Remove extra whitespace and convert to lowercase
        normalized = location_name.strip().lower()

        # Replace spaces and special characters with underscores
        normalized = re.sub(
            r"[^\w\s-]", "", normalized
        )  # Remove special chars except spaces and hyphens
        normalized = re.sub(
            r"[\s-]+", "_", normalized
        )  # Replace spaces/hyphens with underscores
        normalized = re.sub(r"_+", "_", normalized)  # Collapse multiple underscores
        normalized = normalized.strip("_")  # Remove leading/trailing underscores

        # Handle common location patterns and abbreviations
        location_mappings = {
            "new_york_city": "new_york_city",
            "nyc": "new_york_city",
            "ny": "new_york",
            "california": "california",
            "ca": "california",
            "san_francisco": "san_francisco",
            "sf": "san_francisco",
            "los_angeles": "los_angeles",
            "la": "los_angeles",
            "united_states": "united_states",
            "usa": "united_states",
            "us": "united_states",
        }

        # Apply mappings if available
        if normalized in location_mappings:
            normalized = location_mappings[normalized]

        # Ensure minimum length and validity
        if len(normalized) < 2:
            return ""

        return normalized

    async def _store_or_merge_location_event(
        self,
        location_path: str,
        description: str,
        metadata: dict | None = None,
        namespace: str = "default",
    ) -> None:
        """
        Store location event or merge with existing location events.

        Args:
            location_path: Storage path for the location (e.g., "location.san_francisco")
            description: Event description
            metadata: Optional metadata
            namespace: Namespace to store location data in (default: "default")
        """
        # namespace parameter is passed to function

        # Check if location already has events
        existing_items = await self.memory_store.asearch(namespace, location_path)

        if existing_items:
            # Merge with existing location events
            _, existing_data = existing_items[0]

            if isinstance(existing_data, str):
                existing_content = existing_data
            elif isinstance(existing_data, dict):
                existing_content = existing_data.get("raw_text", "")
            else:
                existing_content = str(existing_data)

            # Merge descriptions, avoiding duplicates
            merged_content = self._merge_location_descriptions(
                existing_content, description
            )

            content = {
                "raw_text": merged_content,
                "summary": f"Location events at {location_path.split('.')[1].replace('_', ' ').title()}",
                "structured_data": {
                    "location_name": location_path.split(".")[1]
                    .replace("_", " ")
                    .title(),
                    "location_content": merged_content,
                    "update_type": "location_event",
                },
                "memory_type": "location_event",
            }
        else:
            # Create new location event
            content = {
                "raw_text": description,
                "summary": f"Location event at {location_path.split('.')[1].replace('_', ' ').title()}",
                "structured_data": {
                    "location_name": location_path.split(".")[1]
                    .replace("_", " ")
                    .title(),
                    "location_content": description,
                    "update_type": "location_event",
                },
                "memory_type": "location_event",
            }

        # Include metadata if provided
        if metadata:
            content["metadata"] = metadata

        # Store the location event
        logger.debug(
            f"About to call store_memory_async with namespace='{namespace}', path='{location_path}'"
        )
        logger.debug(f"Content to store: {content}")

        result = await self.memory_store.store_memory_async(
            namespace, content, location_path
        )
        logger.debug(f"store_memory_async returned: {result}")

        # Debug: immediately test if we can find what we just stored
        try:
            test_search = await self.memory_store.asearch(namespace, location_path)
            logger.debug(
                f"Immediate search for '{location_path}' found {len(test_search)} items"
            )
            if test_search:
                logger.debug(f"Found item: {test_search[0]}")

            # Also try searching with prefix
            prefix_search = await self.memory_store.asearch(namespace, "location.")
            logger.debug(
                f"Prefix search for 'location.' found {len(prefix_search)} items"
            )

        except Exception as e:
            logger.debug(f"Immediate search test failed: {e}")

    def _merge_location_descriptions(self, existing: str, new: str) -> str:
        """
        Merge location event descriptions, avoiding duplicates.

        Args:
            existing: Existing location event descriptions
            new: New location event description

        Returns:
            Merged location descriptions
        """
        if not existing:
            return new

        if not new:
            return existing

        # Split by common delimiters
        existing_events = [
            event.strip() for event in existing.split("|") if event.strip()
        ]

        # Check if new event is already present (fuzzy matching)
        new_lower = new.lower()
        for existing_event in existing_events:
            if existing_event.lower() == new_lower:
                return existing  # Duplicate, return existing

        # Add new event
        existing_events.append(new.strip())
        return " | ".join(existing_events)

    async def get_location_summary(
        self, llm: Any | None = None, namespace: str = "default"
    ) -> str:
        """
        Generate a summary of all location events.

        Args:
            llm: Optional LLM for generating narrative summaries
            namespace: Namespace to search for location data (default: "default")

        Returns:
            String summary of location events
        """
        try:
            # namespace parameter is passed to function

            # Search for all location events
            logger.debug(
                f"Searching for location events with query: namespace='{namespace}', prefix='location.'"
            )
            all_items = await self.memory_store.asearch(namespace, "location.")
            logger.debug(f"Search returned {len(all_items)} items")

            # Debug: log what we found
            if all_items:
                logger.info(f"Found {len(all_items)} items with location. prefix")
                for item in all_items[:3]:  # Log first few items
                    logger.info(f"Location item: {item}")
            else:
                logger.debug("No items found with location. prefix")

                # Debug: search for ANY items with location data
                logger.debug("Searching for ANY items with location data...")
                all_items_debug = await self.memory_store.asearch(namespace, "")
                location_items_debug = []
                for path, data in all_items_debug:
                    if isinstance(data, dict) and (
                        data.get("memory_type") == "location_event"
                        or "location_name" in data.get("structured_data", {})
                    ):
                        location_items_debug.append((path, data))
                        logger.debug(f"Found location data under path: {path}")

                if location_items_debug:
                    logger.debug(
                        f"Found {len(location_items_debug)} location events but not under location.* paths!"
                    )
                    return self._generate_structured_location_summary(
                        location_items_debug
                    )
                else:
                    logger.debug("No location events found anywhere in memory store!")

            location_items = all_items  # All items should already have location. prefix

            if not location_items:
                return "No location events available."

            # If no LLM provided, generate structured summary
            if not llm:
                return self._generate_structured_location_summary(location_items)

            # Generate LLM-based narrative summary
            return await self._generate_llm_location_summary(location_items, llm)

        except Exception as e:
            logger.error(f"Failed to generate location summary: {e}")
            logger.error(f"Exception details: {type(e).__name__}: {e!s}")
            import traceback

            logger.error(f"Traceback: {traceback.format_exc()}")
            return "Error generating location summary."

    def _generate_structured_location_summary(self, location_items: list) -> str:
        """Generate a structured location summary without LLM."""
        summary_lines = ["=== USER LOCATION SUMMARY ===", ""]

        # Group and sort locations
        locations = {}
        for path, data in location_items:
            location_name = path.split(".", 1)[1].replace("_", " ").title()

            # Handle nested memory object structure from asearch results
            if isinstance(data, dict):
                # Check if this is a nested memory object with 'content' field
                if "content" in data and isinstance(data["content"], dict):
                    # Extract from nested structure: data['content']['raw_text']
                    content = data["content"].get("raw_text", str(data))
                else:
                    # Direct structure: data['raw_text']
                    content = data.get("raw_text", str(data))
            else:
                content = str(data)

            locations[location_name] = content

        # Sort locations alphabetically
        for location_name in sorted(locations.keys()):
            content = locations[location_name]
            summary_lines.append(f"{location_name}:")

            # Split multiple events and format nicely
            events = content.split(" | ")
            for event in events:
                if event.strip():
                    summary_lines.append(f"  - {event.strip()}")
            summary_lines.append("")

        return "\n".join(summary_lines)

    async def _generate_llm_location_summary(
        self, location_items: list, llm: Any
    ) -> str:
        """Generate an LLM-based narrative location summary."""
        # Prepare location data for LLM
        location_data = []
        for path, data in location_items:
            location_name = path.split(".", 1)[1].replace("_", " ").title()

            if isinstance(data, dict):
                content = data.get("raw_text", str(data))
            else:
                content = str(data)

            location_data.append(f"{location_name}: {content}")

        location_text = "\n".join(location_data)

        prompt = f"""Create a concise narrative summary of the user's location-related experiences and activities. Focus on places they've been, lived, worked, or had significant experiences.

Location Data:
{location_text}

Create a narrative summary that:
1. Groups related locations geographically when possible
2. Highlights significant places and experiences
3. Shows patterns in the user's movements or preferences
4. Keeps the summary concise but informative

Location Summary:"""

        try:
            response = await llm.ainvoke(prompt)
            return response.content.strip()
        except Exception as e:
            logger.error(f"LLM location summary failed: {e}")
            return self._generate_structured_location_summary(location_items)

    async def get_location_events_for_search(
        self, location_query: str, namespace: str = "default"
    ) -> list[dict]:
        """
        Get location events relevant to a search query.

        Args:
            location_query: Search query for locations
            namespace: Namespace to search for location data (default: "default")

        Returns:
            List of relevant location events
        """
        try:
            # namespace parameter is passed to function

            # Search for location events
            all_items = await self.memory_store.asearch(namespace, "location.")
            location_items = [
                (path, data) for path, data in all_items if path.startswith("location.")
            ]

            # Filter by relevance to query
            relevant_events = []
            query_lower = location_query.lower()

            for path, data in location_items:
                location_name = path.split(".", 1)[1].replace("_", " ")

                if isinstance(data, dict):
                    content = data.get("raw_text", str(data))
                else:
                    content = str(data)

                # Check if query matches location name or content
                if (
                    query_lower in location_name.lower()
                    or query_lower in content.lower()
                ):
                    relevant_events.append(
                        {
                            "location": location_name.title(),
                            "content": content,
                            "path": path,
                        }
                    )

            return relevant_events

        except Exception as e:
            logger.error(f"Failed to get location events for search: {e}")
            return []

__init__

__init__(memory_store)

Initialize location memento with memory store.

Source code in src/memoir/memento/location.py
def __init__(self, memory_store):
    """Initialize location memento with memory store."""
    self.memory_store = memory_store

apply_location_events async

apply_location_events(location_events: list[dict[str, str]], metadata: dict | None = None, namespace: str = 'default') -> None

Apply location events to the memory store.

For same-location events, retrieves existing content and merges with new event.

Parameters:

Name Type Description Default
location_events list[dict[str, str]]

List of location events with location and description

required
metadata dict | None

Optional metadata to include with events

None
namespace str

Namespace to store location events in (default: "default")

'default'
Source code in src/memoir/memento/location.py
async def apply_location_events(
    self,
    location_events: list[dict[str, str]],
    metadata: dict | None = None,
    namespace: str = "default",
) -> None:
    """
    Apply location events to the memory store.

    For same-location events, retrieves existing content and merges with new event.

    Args:
        location_events: List of location events with location and description
        metadata: Optional metadata to include with events
        namespace: Namespace to store location events in (default: "default")
    """
    logger.debug(
        f"LocationManager.apply_location_events called with {len(location_events) if location_events else 0} events"
    )
    if not location_events:
        logger.debug("No location events provided to apply_location_events")
        return

    for event in location_events:
        location_name = event.get("location", "")
        description = event.get("description", "")

        if not location_name or not description:
            logger.warning(f"Invalid location event: {event}")
            continue

        # Normalize location name for consistent storage
        normalized_location = self._normalize_location_name(location_name)

        if not normalized_location:
            logger.debug(f"Invalid location name: {location_name}")
            continue

        # Create the location path
        location_path = f"location.{normalized_location}"

        try:
            await self._store_or_merge_location_event(
                location_path, description, metadata, namespace
            )
            logger.debug(f"Applied location event: {location_path} - {description}")
        except Exception as e:
            logger.error(f"Failed to apply location event {location_path}: {e}")

get_location_summary async

get_location_summary(llm: Any | None = None, namespace: str = 'default') -> str

Generate a summary of all location events.

Parameters:

Name Type Description Default
llm Any | None

Optional LLM for generating narrative summaries

None
namespace str

Namespace to search for location data (default: "default")

'default'

Returns:

Type Description
str

String summary of location events

Source code in src/memoir/memento/location.py
async def get_location_summary(
    self, llm: Any | None = None, namespace: str = "default"
) -> str:
    """
    Generate a summary of all location events.

    Args:
        llm: Optional LLM for generating narrative summaries
        namespace: Namespace to search for location data (default: "default")

    Returns:
        String summary of location events
    """
    try:
        # namespace parameter is passed to function

        # Search for all location events
        logger.debug(
            f"Searching for location events with query: namespace='{namespace}', prefix='location.'"
        )
        all_items = await self.memory_store.asearch(namespace, "location.")
        logger.debug(f"Search returned {len(all_items)} items")

        # Debug: log what we found
        if all_items:
            logger.info(f"Found {len(all_items)} items with location. prefix")
            for item in all_items[:3]:  # Log first few items
                logger.info(f"Location item: {item}")
        else:
            logger.debug("No items found with location. prefix")

            # Debug: search for ANY items with location data
            logger.debug("Searching for ANY items with location data...")
            all_items_debug = await self.memory_store.asearch(namespace, "")
            location_items_debug = []
            for path, data in all_items_debug:
                if isinstance(data, dict) and (
                    data.get("memory_type") == "location_event"
                    or "location_name" in data.get("structured_data", {})
                ):
                    location_items_debug.append((path, data))
                    logger.debug(f"Found location data under path: {path}")

            if location_items_debug:
                logger.debug(
                    f"Found {len(location_items_debug)} location events but not under location.* paths!"
                )
                return self._generate_structured_location_summary(
                    location_items_debug
                )
            else:
                logger.debug("No location events found anywhere in memory store!")

        location_items = all_items  # All items should already have location. prefix

        if not location_items:
            return "No location events available."

        # If no LLM provided, generate structured summary
        if not llm:
            return self._generate_structured_location_summary(location_items)

        # Generate LLM-based narrative summary
        return await self._generate_llm_location_summary(location_items, llm)

    except Exception as e:
        logger.error(f"Failed to generate location summary: {e}")
        logger.error(f"Exception details: {type(e).__name__}: {e!s}")
        import traceback

        logger.error(f"Traceback: {traceback.format_exc()}")
        return "Error generating location summary."
get_location_events_for_search(location_query: str, namespace: str = 'default') -> list[dict]

Get location events relevant to a search query.

Parameters:

Name Type Description Default
location_query str

Search query for locations

required
namespace str

Namespace to search for location data (default: "default")

'default'

Returns:

Type Description
list[dict]

List of relevant location events

Source code in src/memoir/memento/location.py
async def get_location_events_for_search(
    self, location_query: str, namespace: str = "default"
) -> list[dict]:
    """
    Get location events relevant to a search query.

    Args:
        location_query: Search query for locations
        namespace: Namespace to search for location data (default: "default")

    Returns:
        List of relevant location events
    """
    try:
        # namespace parameter is passed to function

        # Search for location events
        all_items = await self.memory_store.asearch(namespace, "location.")
        location_items = [
            (path, data) for path, data in all_items if path.startswith("location.")
        ]

        # Filter by relevance to query
        relevant_events = []
        query_lower = location_query.lower()

        for path, data in location_items:
            location_name = path.split(".", 1)[1].replace("_", " ")

            if isinstance(data, dict):
                content = data.get("raw_text", str(data))
            else:
                content = str(data)

            # Check if query matches location name or content
            if (
                query_lower in location_name.lower()
                or query_lower in content.lower()
            ):
                relevant_events.append(
                    {
                        "location": location_name.title(),
                        "content": content,
                        "path": path,
                    }
                )

        return relevant_events

    except Exception as e:
        logger.error(f"Failed to get location events for search: {e}")
        return []

ProfileMemento

Manages user profile data and generates profile summaries.

Source code in src/memoir/memento/profile.py
class ProfileMemento:
    """Manages user profile data and generates profile summaries."""

    def __init__(self, memory_store):
        """Initialize profile memento with memory store."""
        self.memory_store = memory_store

    async def apply_profile_updates(
        self,
        profile_updates: list[dict[str, str]],
        metadata: dict | None = None,
        namespace: str = "default",
    ) -> None:
        """
        Apply profile updates to the memory store.

        Args:
            profile_updates: List of profile updates with path and value
            metadata: Optional metadata to include with updates
            namespace: Namespace to store the profile updates in (default: "default")
        """
        if not profile_updates:
            return

        for update in profile_updates:
            path = update.get("path", "")
            value = update.get("value", "")

            if not path or not value:
                logger.warning(f"Invalid profile update: {update}")
                continue

            # Check if this is a profile path
            if not path.startswith("profile."):
                logger.warning(f"Non-profile path in profile update: {path}")
                continue

            # Store the profile update as a memory with special handling
            memory_data = {
                "raw_text": value,
                "summary": f"Profile update: {path.split('.')[-1]} = {value}",
                "structured_data": {
                    "profile_field": path,
                    "profile_value": value,
                    "update_type": "profile_update",
                },
                "memory_type": "profile_update",
            }

            # Store directly using the async method (consistent with timeline manager)
            await self.memory_store.store_memory_async(namespace, memory_data, path)
            logger.info(f"Applied profile update: {path} = {value}")

    async def get_profile_summary(self, llm=None, namespace: str = "default") -> str:
        """
        Generate a comprehensive profile summary from stored profile data.

        Args:
            llm: Optional LLM for generating narrative summary
            namespace: Namespace to search for profile data (default: "default")

        Returns:
            Profile summary string
        """
        try:
            # Search for all profile memories using the correct method signature
            # Use provided namespace string as expected by asearch method
            profile_memories = await self.memory_store.asearch(namespace, "profile.")

            # Debug: log what we found
            logger.debug(f"Found {len(profile_memories)} profile memories")

            # Limit results manually if needed
            if len(profile_memories) > 1000:
                profile_memories = profile_memories[:1000]

            if not profile_memories:
                return "No profile information available."

            # Organize profile data by category
            profile_data = self._organize_profile_data(profile_memories)

            # Generate summary
            if llm:
                return await self._generate_llm_summary(profile_data, llm)
            else:
                return self._generate_structured_summary(profile_data)

        except Exception as e:
            import traceback

            logger.error(f"Failed to generate profile summary: {e}")
            logger.error(f"Full traceback: {traceback.format_exc()}")
            return f"Error generating profile summary: {e}"

    def _organize_profile_data(
        self, profile_memories: list[tuple[str, Any]]
    ) -> dict[str, dict[str, str]]:
        """Organize profile memories into a structured hierarchy."""
        organized = {}

        for semantic_key, data in profile_memories:
            try:
                # Ensure semantic_key is a string
                if not isinstance(semantic_key, str):
                    logger.warning(
                        f"Non-string semantic key: {type(semantic_key)}: {semantic_key}"
                    )
                    semantic_key = str(semantic_key)

                # Handle the data format - it could be a MemoryItem dict or other format
                if isinstance(data, dict):
                    # Check if this is a MemoryItem structure with content field
                    if "content" in data and isinstance(data["content"], dict):
                        # This is a MemoryItem with content - extract the actual memory data
                        memory_data = data["content"]
                        structured_data = memory_data.get("structured_data", {})
                    else:
                        # This is the memory data directly
                        memory_data = data
                        structured_data = data.get("structured_data", {})
                else:
                    # If it's not a dict, try to extract meaningful data
                    logger.warning(
                        f"Unexpected data format for {semantic_key}: {type(data)}"
                    )
                    continue

                # Get the profile path and value
                profile_field = structured_data.get("profile_field")
                profile_value = structured_data.get("profile_value")
                update_type = structured_data.get("update_type")

                # Only process memories that are actual profile updates
                if update_type != "profile_update":
                    logger.debug(f"Skipping non-profile-update memory: {semantic_key}")
                    continue

                if not profile_field or not profile_value:
                    logger.warning(
                        f"Profile update memory missing field or value: {semantic_key}"
                    )
                    continue

                # Ensure profile_field is a string
                if not isinstance(profile_field, str):
                    logger.warning(
                        f"Non-string profile_field: {type(profile_field)}: {profile_field}"
                    )
                    profile_field = (
                        str(profile_field)
                        if profile_field is not None
                        else semantic_key
                    )

                if profile_field and profile_value:
                    # Convert profile_value to string if it's not already
                    if isinstance(profile_value, dict):
                        # If it's a dict, convert to JSON string
                        import json

                        profile_value_str = json.dumps(profile_value)
                    elif isinstance(profile_value, (list, tuple)):
                        # If it's a list/tuple, join as string
                        profile_value_str = ", ".join(str(x) for x in profile_value)
                    else:
                        profile_value_str = str(profile_value)

                    # Build nested dictionary structure
                    parts = profile_field.split(".")
                    current = organized

                    # Navigate to the correct nested position
                    for part in parts[:-1]:  # All except the last part
                        # Ensure part is a string
                        part = str(part) if part is not None else "unknown"
                        if part not in current:
                            current[part] = {}
                        current = current[part]

                    # Set the final value as string
                    final_key = str(parts[-1]) if parts[-1] is not None else "unknown"
                    current[final_key] = profile_value_str

            except Exception as e:
                logger.warning(f"Failed to process profile memory {semantic_key}: {e}")
                continue

        return organized

    def _generate_structured_summary(self, profile_data: dict[str, Any]) -> str:
        """Generate a structured text summary of profile data."""
        if not profile_data:
            return "No profile information available."

        summary_parts = ["=== USER PROFILE SUMMARY ===\n"]

        # Process each main category
        category_order = [
            ("personal", "Personal Information"),
            ("professional", "Professional Profile"),
            ("health", "Health & Wellness"),
            ("finance", "Financial Profile"),
            ("living", "Living Situation"),
            ("relationships", "Relationships & Social"),
            ("goals", "Goals & Aspirations"),
        ]

        for key, title in category_order:
            if key in profile_data:
                summary_parts.append(f"\n{title}:")
                summary_parts.append(
                    self._format_category_data(profile_data[key], indent=1)
                )

        # Add any other categories not in the standard order
        processed_keys = {key for key, _ in category_order}
        for key, data in profile_data.items():
            if key not in processed_keys:
                title = key.replace("_", " ").title()
                summary_parts.append(f"\n{title}:")
                summary_parts.append(self._format_category_data(data, indent=1))

        return "\n".join(summary_parts)

    def _format_category_data(self, data: dict[str, Any], indent: int = 0) -> str:
        """Format category data with proper indentation."""
        if not data:
            return "  " * indent + "No information available"

        lines = []
        prefix = "  " * indent

        for key, value in data.items():
            if isinstance(value, dict):
                # Nested category
                category_title = key.replace("_", " ").title()
                lines.append(f"{prefix}{category_title}:")
                lines.append(self._format_category_data(value, indent + 1))
            else:
                # Leaf value
                field_name = key.replace("_", " ").title()
                lines.append(f"{prefix}- {field_name}: {value}")

        return "\n".join(lines)

    async def _generate_llm_summary(self, profile_data: dict[str, Any], llm) -> str:
        """Generate a narrative summary using LLM."""
        try:
            # Convert profile data to a readable format for LLM
            structured_summary = self._generate_structured_summary(profile_data)

            prompt = f"""Generate a comprehensive, narrative profile summary based on the following structured profile data. Create a natural, flowing description that captures the key aspects of this person's life, background, and characteristics.

Profile Data:
{structured_summary}

Instructions:
- Write in third person
- Create a cohesive narrative that flows naturally
- Focus on the most important and defining characteristics
- Group related information together logically
- Keep it comprehensive but concise (2-3 paragraphs)
- Avoid simply listing facts - weave them into a story

Generate a professional profile summary:"""

            response = await llm.ainvoke(prompt)

            if hasattr(response, "content"):
                narrative_summary = response.content
            else:
                narrative_summary = str(response)

            # Combine structured and narrative summaries
            return f"=== USER PROFILE SUMMARY ===\n\n{narrative_summary}\n\n--- Detailed Profile Data ---\n{structured_summary}"

        except Exception as e:
            logger.error(f"Failed to generate LLM summary: {e}")
            # Fallback to structured summary
            return self._generate_structured_summary(profile_data)

__init__

__init__(memory_store)

Initialize profile memento with memory store.

Source code in src/memoir/memento/profile.py
def __init__(self, memory_store):
    """Initialize profile memento with memory store."""
    self.memory_store = memory_store

apply_profile_updates async

apply_profile_updates(profile_updates: list[dict[str, str]], metadata: dict | None = None, namespace: str = 'default') -> None

Apply profile updates to the memory store.

Parameters:

Name Type Description Default
profile_updates list[dict[str, str]]

List of profile updates with path and value

required
metadata dict | None

Optional metadata to include with updates

None
namespace str

Namespace to store the profile updates in (default: "default")

'default'
Source code in src/memoir/memento/profile.py
async def apply_profile_updates(
    self,
    profile_updates: list[dict[str, str]],
    metadata: dict | None = None,
    namespace: str = "default",
) -> None:
    """
    Apply profile updates to the memory store.

    Args:
        profile_updates: List of profile updates with path and value
        metadata: Optional metadata to include with updates
        namespace: Namespace to store the profile updates in (default: "default")
    """
    if not profile_updates:
        return

    for update in profile_updates:
        path = update.get("path", "")
        value = update.get("value", "")

        if not path or not value:
            logger.warning(f"Invalid profile update: {update}")
            continue

        # Check if this is a profile path
        if not path.startswith("profile."):
            logger.warning(f"Non-profile path in profile update: {path}")
            continue

        # Store the profile update as a memory with special handling
        memory_data = {
            "raw_text": value,
            "summary": f"Profile update: {path.split('.')[-1]} = {value}",
            "structured_data": {
                "profile_field": path,
                "profile_value": value,
                "update_type": "profile_update",
            },
            "memory_type": "profile_update",
        }

        # Store directly using the async method (consistent with timeline manager)
        await self.memory_store.store_memory_async(namespace, memory_data, path)
        logger.info(f"Applied profile update: {path} = {value}")

get_profile_summary async

get_profile_summary(llm=None, namespace: str = 'default') -> str

Generate a comprehensive profile summary from stored profile data.

Parameters:

Name Type Description Default
llm

Optional LLM for generating narrative summary

None
namespace str

Namespace to search for profile data (default: "default")

'default'

Returns:

Type Description
str

Profile summary string

Source code in src/memoir/memento/profile.py
async def get_profile_summary(self, llm=None, namespace: str = "default") -> str:
    """
    Generate a comprehensive profile summary from stored profile data.

    Args:
        llm: Optional LLM for generating narrative summary
        namespace: Namespace to search for profile data (default: "default")

    Returns:
        Profile summary string
    """
    try:
        # Search for all profile memories using the correct method signature
        # Use provided namespace string as expected by asearch method
        profile_memories = await self.memory_store.asearch(namespace, "profile.")

        # Debug: log what we found
        logger.debug(f"Found {len(profile_memories)} profile memories")

        # Limit results manually if needed
        if len(profile_memories) > 1000:
            profile_memories = profile_memories[:1000]

        if not profile_memories:
            return "No profile information available."

        # Organize profile data by category
        profile_data = self._organize_profile_data(profile_memories)

        # Generate summary
        if llm:
            return await self._generate_llm_summary(profile_data, llm)
        else:
            return self._generate_structured_summary(profile_data)

    except Exception as e:
        import traceback

        logger.error(f"Failed to generate profile summary: {e}")
        logger.error(f"Full traceback: {traceback.format_exc()}")
        return f"Error generating profile summary: {e}"

Profile

Manages user profile data and generates profile summaries.

Source code in src/memoir/memento/profile.py
class ProfileMemento:
    """Manages user profile data and generates profile summaries."""

    def __init__(self, memory_store):
        """Initialize profile memento with memory store."""
        self.memory_store = memory_store

    async def apply_profile_updates(
        self,
        profile_updates: list[dict[str, str]],
        metadata: dict | None = None,
        namespace: str = "default",
    ) -> None:
        """
        Apply profile updates to the memory store.

        Args:
            profile_updates: List of profile updates with path and value
            metadata: Optional metadata to include with updates
            namespace: Namespace to store the profile updates in (default: "default")
        """
        if not profile_updates:
            return

        for update in profile_updates:
            path = update.get("path", "")
            value = update.get("value", "")

            if not path or not value:
                logger.warning(f"Invalid profile update: {update}")
                continue

            # Check if this is a profile path
            if not path.startswith("profile."):
                logger.warning(f"Non-profile path in profile update: {path}")
                continue

            # Store the profile update as a memory with special handling
            memory_data = {
                "raw_text": value,
                "summary": f"Profile update: {path.split('.')[-1]} = {value}",
                "structured_data": {
                    "profile_field": path,
                    "profile_value": value,
                    "update_type": "profile_update",
                },
                "memory_type": "profile_update",
            }

            # Store directly using the async method (consistent with timeline manager)
            await self.memory_store.store_memory_async(namespace, memory_data, path)
            logger.info(f"Applied profile update: {path} = {value}")

    async def get_profile_summary(self, llm=None, namespace: str = "default") -> str:
        """
        Generate a comprehensive profile summary from stored profile data.

        Args:
            llm: Optional LLM for generating narrative summary
            namespace: Namespace to search for profile data (default: "default")

        Returns:
            Profile summary string
        """
        try:
            # Search for all profile memories using the correct method signature
            # Use provided namespace string as expected by asearch method
            profile_memories = await self.memory_store.asearch(namespace, "profile.")

            # Debug: log what we found
            logger.debug(f"Found {len(profile_memories)} profile memories")

            # Limit results manually if needed
            if len(profile_memories) > 1000:
                profile_memories = profile_memories[:1000]

            if not profile_memories:
                return "No profile information available."

            # Organize profile data by category
            profile_data = self._organize_profile_data(profile_memories)

            # Generate summary
            if llm:
                return await self._generate_llm_summary(profile_data, llm)
            else:
                return self._generate_structured_summary(profile_data)

        except Exception as e:
            import traceback

            logger.error(f"Failed to generate profile summary: {e}")
            logger.error(f"Full traceback: {traceback.format_exc()}")
            return f"Error generating profile summary: {e}"

    def _organize_profile_data(
        self, profile_memories: list[tuple[str, Any]]
    ) -> dict[str, dict[str, str]]:
        """Organize profile memories into a structured hierarchy."""
        organized = {}

        for semantic_key, data in profile_memories:
            try:
                # Ensure semantic_key is a string
                if not isinstance(semantic_key, str):
                    logger.warning(
                        f"Non-string semantic key: {type(semantic_key)}: {semantic_key}"
                    )
                    semantic_key = str(semantic_key)

                # Handle the data format - it could be a MemoryItem dict or other format
                if isinstance(data, dict):
                    # Check if this is a MemoryItem structure with content field
                    if "content" in data and isinstance(data["content"], dict):
                        # This is a MemoryItem with content - extract the actual memory data
                        memory_data = data["content"]
                        structured_data = memory_data.get("structured_data", {})
                    else:
                        # This is the memory data directly
                        memory_data = data
                        structured_data = data.get("structured_data", {})
                else:
                    # If it's not a dict, try to extract meaningful data
                    logger.warning(
                        f"Unexpected data format for {semantic_key}: {type(data)}"
                    )
                    continue

                # Get the profile path and value
                profile_field = structured_data.get("profile_field")
                profile_value = structured_data.get("profile_value")
                update_type = structured_data.get("update_type")

                # Only process memories that are actual profile updates
                if update_type != "profile_update":
                    logger.debug(f"Skipping non-profile-update memory: {semantic_key}")
                    continue

                if not profile_field or not profile_value:
                    logger.warning(
                        f"Profile update memory missing field or value: {semantic_key}"
                    )
                    continue

                # Ensure profile_field is a string
                if not isinstance(profile_field, str):
                    logger.warning(
                        f"Non-string profile_field: {type(profile_field)}: {profile_field}"
                    )
                    profile_field = (
                        str(profile_field)
                        if profile_field is not None
                        else semantic_key
                    )

                if profile_field and profile_value:
                    # Convert profile_value to string if it's not already
                    if isinstance(profile_value, dict):
                        # If it's a dict, convert to JSON string
                        import json

                        profile_value_str = json.dumps(profile_value)
                    elif isinstance(profile_value, (list, tuple)):
                        # If it's a list/tuple, join as string
                        profile_value_str = ", ".join(str(x) for x in profile_value)
                    else:
                        profile_value_str = str(profile_value)

                    # Build nested dictionary structure
                    parts = profile_field.split(".")
                    current = organized

                    # Navigate to the correct nested position
                    for part in parts[:-1]:  # All except the last part
                        # Ensure part is a string
                        part = str(part) if part is not None else "unknown"
                        if part not in current:
                            current[part] = {}
                        current = current[part]

                    # Set the final value as string
                    final_key = str(parts[-1]) if parts[-1] is not None else "unknown"
                    current[final_key] = profile_value_str

            except Exception as e:
                logger.warning(f"Failed to process profile memory {semantic_key}: {e}")
                continue

        return organized

    def _generate_structured_summary(self, profile_data: dict[str, Any]) -> str:
        """Generate a structured text summary of profile data."""
        if not profile_data:
            return "No profile information available."

        summary_parts = ["=== USER PROFILE SUMMARY ===\n"]

        # Process each main category
        category_order = [
            ("personal", "Personal Information"),
            ("professional", "Professional Profile"),
            ("health", "Health & Wellness"),
            ("finance", "Financial Profile"),
            ("living", "Living Situation"),
            ("relationships", "Relationships & Social"),
            ("goals", "Goals & Aspirations"),
        ]

        for key, title in category_order:
            if key in profile_data:
                summary_parts.append(f"\n{title}:")
                summary_parts.append(
                    self._format_category_data(profile_data[key], indent=1)
                )

        # Add any other categories not in the standard order
        processed_keys = {key for key, _ in category_order}
        for key, data in profile_data.items():
            if key not in processed_keys:
                title = key.replace("_", " ").title()
                summary_parts.append(f"\n{title}:")
                summary_parts.append(self._format_category_data(data, indent=1))

        return "\n".join(summary_parts)

    def _format_category_data(self, data: dict[str, Any], indent: int = 0) -> str:
        """Format category data with proper indentation."""
        if not data:
            return "  " * indent + "No information available"

        lines = []
        prefix = "  " * indent

        for key, value in data.items():
            if isinstance(value, dict):
                # Nested category
                category_title = key.replace("_", " ").title()
                lines.append(f"{prefix}{category_title}:")
                lines.append(self._format_category_data(value, indent + 1))
            else:
                # Leaf value
                field_name = key.replace("_", " ").title()
                lines.append(f"{prefix}- {field_name}: {value}")

        return "\n".join(lines)

    async def _generate_llm_summary(self, profile_data: dict[str, Any], llm) -> str:
        """Generate a narrative summary using LLM."""
        try:
            # Convert profile data to a readable format for LLM
            structured_summary = self._generate_structured_summary(profile_data)

            prompt = f"""Generate a comprehensive, narrative profile summary based on the following structured profile data. Create a natural, flowing description that captures the key aspects of this person's life, background, and characteristics.

Profile Data:
{structured_summary}

Instructions:
- Write in third person
- Create a cohesive narrative that flows naturally
- Focus on the most important and defining characteristics
- Group related information together logically
- Keep it comprehensive but concise (2-3 paragraphs)
- Avoid simply listing facts - weave them into a story

Generate a professional profile summary:"""

            response = await llm.ainvoke(prompt)

            if hasattr(response, "content"):
                narrative_summary = response.content
            else:
                narrative_summary = str(response)

            # Combine structured and narrative summaries
            return f"=== USER PROFILE SUMMARY ===\n\n{narrative_summary}\n\n--- Detailed Profile Data ---\n{structured_summary}"

        except Exception as e:
            logger.error(f"Failed to generate LLM summary: {e}")
            # Fallback to structured summary
            return self._generate_structured_summary(profile_data)

__init__

__init__(memory_store)

Initialize profile memento with memory store.

Source code in src/memoir/memento/profile.py
def __init__(self, memory_store):
    """Initialize profile memento with memory store."""
    self.memory_store = memory_store

apply_profile_updates async

apply_profile_updates(profile_updates: list[dict[str, str]], metadata: dict | None = None, namespace: str = 'default') -> None

Apply profile updates to the memory store.

Parameters:

Name Type Description Default
profile_updates list[dict[str, str]]

List of profile updates with path and value

required
metadata dict | None

Optional metadata to include with updates

None
namespace str

Namespace to store the profile updates in (default: "default")

'default'
Source code in src/memoir/memento/profile.py
async def apply_profile_updates(
    self,
    profile_updates: list[dict[str, str]],
    metadata: dict | None = None,
    namespace: str = "default",
) -> None:
    """
    Apply profile updates to the memory store.

    Args:
        profile_updates: List of profile updates with path and value
        metadata: Optional metadata to include with updates
        namespace: Namespace to store the profile updates in (default: "default")
    """
    if not profile_updates:
        return

    for update in profile_updates:
        path = update.get("path", "")
        value = update.get("value", "")

        if not path or not value:
            logger.warning(f"Invalid profile update: {update}")
            continue

        # Check if this is a profile path
        if not path.startswith("profile."):
            logger.warning(f"Non-profile path in profile update: {path}")
            continue

        # Store the profile update as a memory with special handling
        memory_data = {
            "raw_text": value,
            "summary": f"Profile update: {path.split('.')[-1]} = {value}",
            "structured_data": {
                "profile_field": path,
                "profile_value": value,
                "update_type": "profile_update",
            },
            "memory_type": "profile_update",
        }

        # Store directly using the async method (consistent with timeline manager)
        await self.memory_store.store_memory_async(namespace, memory_data, path)
        logger.info(f"Applied profile update: {path} = {value}")

get_profile_summary async

get_profile_summary(llm=None, namespace: str = 'default') -> str

Generate a comprehensive profile summary from stored profile data.

Parameters:

Name Type Description Default
llm

Optional LLM for generating narrative summary

None
namespace str

Namespace to search for profile data (default: "default")

'default'

Returns:

Type Description
str

Profile summary string

Source code in src/memoir/memento/profile.py
async def get_profile_summary(self, llm=None, namespace: str = "default") -> str:
    """
    Generate a comprehensive profile summary from stored profile data.

    Args:
        llm: Optional LLM for generating narrative summary
        namespace: Namespace to search for profile data (default: "default")

    Returns:
        Profile summary string
    """
    try:
        # Search for all profile memories using the correct method signature
        # Use provided namespace string as expected by asearch method
        profile_memories = await self.memory_store.asearch(namespace, "profile.")

        # Debug: log what we found
        logger.debug(f"Found {len(profile_memories)} profile memories")

        # Limit results manually if needed
        if len(profile_memories) > 1000:
            profile_memories = profile_memories[:1000]

        if not profile_memories:
            return "No profile information available."

        # Organize profile data by category
        profile_data = self._organize_profile_data(profile_memories)

        # Generate summary
        if llm:
            return await self._generate_llm_summary(profile_data, llm)
        else:
            return self._generate_structured_summary(profile_data)

    except Exception as e:
        import traceback

        logger.error(f"Failed to generate profile summary: {e}")
        logger.error(f"Full traceback: {traceback.format_exc()}")
        return f"Error generating profile summary: {e}"

TimelineMemento

Manages user timeline data and generates chronological event summaries.

Source code in src/memoir/memento/timeline.py
class TimelineMemento:
    """Manages user timeline data and generates chronological event summaries."""

    def __init__(self, memory_store):
        """Initialize timeline memento with memory store."""
        self.memory_store = memory_store

    async def apply_timeline_events(
        self,
        timeline_events: list[dict[str, str]],
        metadata: dict | None = None,
        original_content: str | None = None,
        namespace: str = "default",
    ) -> None:
        """
        Apply timeline events to the memory store.

        For same-day events, retrieves existing content and merges with new event.

        Args:
            timeline_events: List of timeline events with date and description
            metadata: Optional metadata to include with events
            namespace: Namespace to store timeline events in (default: "default")
        """
        if not timeline_events:
            return

        for event in timeline_events:
            date_str = event.get("date", "")  # Format: YYYYMMDD
            description = event.get("description", "")

            if not date_str or not description:
                logger.warning(f"Invalid timeline event: {event}")
                continue

            # Validate date format
            if not self._validate_date_format(date_str):
                logger.warning(f"Invalid date format (expected YYYYMMDD): {date_str}")
                continue

            # Create the timeline path
            path = f"timeline.{date_str}"

            # Check if there's already an event for this date
            existing_events = await self.memory_store.asearch(namespace, path)

            if existing_events:
                # Merge with existing event(s) for the same day
                existing_content = self._extract_existing_content(existing_events)
                merged_content = self._merge_events(existing_content, description)
            else:
                merged_content = description

            # Store the timeline event as a memory
            memory_data = {
                "raw_text": merged_content,
                "original_content": original_content
                or merged_content,  # Store original input if available
                "summary": f"Timeline event on {self._format_date_display(date_str)}",
                "structured_data": {
                    "timeline_date": date_str,
                    "timeline_content": merged_content,
                    "original_content": original_content or merged_content,
                    "update_type": "timeline_event",
                },
                "memory_type": "timeline_event",
            }

            logger.info(f"DEBUG: Storing timeline memory_data: {memory_data}")

            # Store directly using the memory store with correct signature (async)
            await self.memory_store.store_memory_async(namespace, memory_data, path)
            logger.info(f"Applied timeline event: {path} = {merged_content[:100]}...")

    async def get_timeline_summary(
        self,
        start_date: str | None = None,
        end_date: str | None = None,
        llm=None,
        namespace: str = "default",
    ) -> str:
        """
        Generate a comprehensive timeline summary from stored timeline data.

        Args:
            start_date: Optional start date (YYYYMMDD format)
            end_date: Optional end date (YYYYMMDD format)
            llm: Optional LLM for generating narrative summary

        Returns:
            Timeline summary string
        """
        try:
            # Search for all timeline memories
            timeline_memories = await self.memory_store.asearch(namespace, "timeline.")

            # Debug: log what we found
            logger.debug(f"Found {len(timeline_memories)} timeline memories")

            # Filter by date range if specified
            if start_date or end_date:
                timeline_memories = self._filter_by_date_range(
                    timeline_memories, start_date, end_date
                )

            # Limit results if too many
            if len(timeline_memories) > 1000:
                timeline_memories = timeline_memories[:1000]

            if not timeline_memories:
                return "No timeline events available."

            # Organize timeline data chronologically
            timeline_data = self._organize_timeline_data(timeline_memories)

            # Generate summary
            if llm:
                return await self._generate_llm_summary(timeline_data, llm)
            else:
                return self._generate_structured_summary(timeline_data)

        except Exception as e:
            import traceback

            logger.error(f"Failed to generate timeline summary: {e}")
            logger.error(f"Full traceback: {traceback.format_exc()}")
            return f"Error generating timeline summary: {e}"

    def _validate_date_format(self, date_str: str) -> bool:
        """Validate that date string is in YYYYMMDD format."""
        if len(date_str) != 8:
            return False
        try:
            datetime.strptime(date_str, "%Y%m%d")
            return True
        except ValueError:
            return False

    def _format_date_display(self, date_str: str) -> str:
        """Format YYYYMMDD to human-readable date."""
        try:
            dt = datetime.strptime(date_str, "%Y%m%d")
            return dt.strftime("%B %d, %Y")
        except ValueError:
            return date_str

    def _extract_existing_content(self, existing_events: list[tuple[str, Any]]) -> str:
        """Extract content from existing timeline events."""
        contents = []
        for _, data in existing_events:
            if isinstance(data, dict):
                # Check if this is a MemoryItem structure with content field
                if "content" in data and isinstance(data["content"], dict):
                    memory_data = data["content"]
                    structured_data = memory_data.get("structured_data", {})
                    timeline_content = structured_data.get("timeline_content", "")
                    if timeline_content:
                        contents.append(timeline_content)
                else:
                    # Try direct access
                    structured_data = data.get("structured_data", {})
                    timeline_content = structured_data.get("timeline_content", "")
                    if timeline_content:
                        contents.append(timeline_content)

        return " | ".join(contents) if contents else ""

    def _merge_events(self, existing_content: str, new_content: str) -> str:
        """Merge existing and new events for the same day."""
        if not existing_content:
            return new_content

        # Simple merge strategy - combine with separator
        # In production, you might want to use an LLM to create a better summary
        return f"{existing_content} | {new_content}"

    def _filter_by_date_range(
        self,
        memories: list[tuple[str, Any]],
        start_date: str | None,
        end_date: str | None,
    ) -> list[tuple[str, Any]]:
        """Filter timeline memories by date range."""
        filtered = []

        for semantic_key, data in memories:
            # Extract date from key (timeline.YYYYMMDD)
            if "." in semantic_key:
                date_str = semantic_key.split(".")[-1]
                if self._validate_date_format(date_str):
                    # Check if within range
                    if start_date and date_str < start_date:
                        continue
                    if end_date and date_str > end_date:
                        continue
                    filtered.append((semantic_key, data))

        return filtered

    def _organize_timeline_data(
        self, timeline_memories: list[tuple[str, Any]]
    ) -> dict[str, str]:
        """Organize timeline memories into a chronological structure."""
        organized = {}

        for semantic_key, data in timeline_memories:
            try:
                # Extract date from key
                if "." not in semantic_key:
                    continue

                date_str = semantic_key.split(".")[-1]
                if not self._validate_date_format(date_str):
                    continue

                # Handle the data format
                if isinstance(data, dict):
                    # Check if this is a MemoryItem structure with content field
                    if "content" in data and isinstance(data["content"], dict):
                        memory_data = data["content"]
                        structured_data = memory_data.get("structured_data", {})
                    else:
                        memory_data = data
                        structured_data = data.get("structured_data", {})

                    # Get the timeline content
                    timeline_content = structured_data.get("timeline_content")
                    update_type = structured_data.get("update_type")

                    # Only process memories that are actual timeline events
                    if update_type != "timeline_event":
                        logger.debug(
                            f"Skipping non-timeline-event memory: {semantic_key}"
                        )
                        continue

                    if timeline_content:
                        organized[date_str] = timeline_content

            except Exception as e:
                logger.warning(f"Failed to process timeline memory {semantic_key}: {e}")
                continue

        # Sort by date
        sorted_dates = sorted(organized.keys())
        return {date: organized[date] for date in sorted_dates}

    def _generate_structured_summary(self, timeline_data: dict[str, str]) -> str:
        """Generate a structured text summary of timeline data."""
        if not timeline_data:
            return "No timeline events available."

        summary_parts = ["=== USER TIMELINE ===\n"]

        # Group by year and month for better organization
        events_by_year = {}
        for date_str, content in timeline_data.items():
            year = date_str[:4]
            month = date_str[4:6]

            if year not in events_by_year:
                events_by_year[year] = {}
            if month not in events_by_year[year]:
                events_by_year[year][month] = []

            events_by_year[year][month].append((date_str, content))

        # Generate summary by year and month
        for year in sorted(events_by_year.keys(), reverse=True):
            summary_parts.append(f"\n{year}:")

            for month in sorted(events_by_year[year].keys(), reverse=True):
                month_name = datetime.strptime(f"{year}{month}01", "%Y%m%d").strftime(
                    "%B"
                )
                summary_parts.append(f"\n  {month_name}:")

                for date_str, content in sorted(
                    events_by_year[year][month], reverse=True
                ):
                    day = int(date_str[6:8])
                    summary_parts.append(f"    {day:2d}: {content}")

        return "\n".join(summary_parts)

    async def _generate_llm_summary(self, timeline_data: dict[str, str], llm) -> str:
        """Generate a narrative summary using LLM."""
        try:
            # Convert timeline data to a readable format for LLM
            structured_summary = self._generate_structured_summary(timeline_data)

            prompt = f"""Generate a comprehensive, narrative timeline summary based on the following chronological events. Create a natural, flowing description that captures the key events and their significance in the person's life.

Timeline Data:
{structured_summary}

Instructions:
- Write in third person
- Create a cohesive narrative that flows naturally through time
- Highlight significant events and patterns
- Group related events logically
- Keep it comprehensive but concise
- Focus on the progression and development over time

Generate a timeline narrative:"""

            response = await llm.ainvoke(prompt)

            if hasattr(response, "content"):
                narrative_summary = response.content
            else:
                narrative_summary = str(response)

            # Combine structured and narrative summaries
            return f"=== USER TIMELINE ===\n\n{narrative_summary}\n\n--- Detailed Timeline ---\n{structured_summary}"

        except Exception as e:
            logger.error(f"Failed to generate LLM summary: {e}")
            # Fallback to structured summary
            return self._generate_structured_summary(timeline_data)

__init__

__init__(memory_store)

Initialize timeline memento with memory store.

Source code in src/memoir/memento/timeline.py
def __init__(self, memory_store):
    """Initialize timeline memento with memory store."""
    self.memory_store = memory_store

apply_timeline_events async

apply_timeline_events(timeline_events: list[dict[str, str]], metadata: dict | None = None, original_content: str | None = None, namespace: str = 'default') -> None

Apply timeline events to the memory store.

For same-day events, retrieves existing content and merges with new event.

Parameters:

Name Type Description Default
timeline_events list[dict[str, str]]

List of timeline events with date and description

required
metadata dict | None

Optional metadata to include with events

None
namespace str

Namespace to store timeline events in (default: "default")

'default'
Source code in src/memoir/memento/timeline.py
async def apply_timeline_events(
    self,
    timeline_events: list[dict[str, str]],
    metadata: dict | None = None,
    original_content: str | None = None,
    namespace: str = "default",
) -> None:
    """
    Apply timeline events to the memory store.

    For same-day events, retrieves existing content and merges with new event.

    Args:
        timeline_events: List of timeline events with date and description
        metadata: Optional metadata to include with events
        namespace: Namespace to store timeline events in (default: "default")
    """
    if not timeline_events:
        return

    for event in timeline_events:
        date_str = event.get("date", "")  # Format: YYYYMMDD
        description = event.get("description", "")

        if not date_str or not description:
            logger.warning(f"Invalid timeline event: {event}")
            continue

        # Validate date format
        if not self._validate_date_format(date_str):
            logger.warning(f"Invalid date format (expected YYYYMMDD): {date_str}")
            continue

        # Create the timeline path
        path = f"timeline.{date_str}"

        # Check if there's already an event for this date
        existing_events = await self.memory_store.asearch(namespace, path)

        if existing_events:
            # Merge with existing event(s) for the same day
            existing_content = self._extract_existing_content(existing_events)
            merged_content = self._merge_events(existing_content, description)
        else:
            merged_content = description

        # Store the timeline event as a memory
        memory_data = {
            "raw_text": merged_content,
            "original_content": original_content
            or merged_content,  # Store original input if available
            "summary": f"Timeline event on {self._format_date_display(date_str)}",
            "structured_data": {
                "timeline_date": date_str,
                "timeline_content": merged_content,
                "original_content": original_content or merged_content,
                "update_type": "timeline_event",
            },
            "memory_type": "timeline_event",
        }

        logger.info(f"DEBUG: Storing timeline memory_data: {memory_data}")

        # Store directly using the memory store with correct signature (async)
        await self.memory_store.store_memory_async(namespace, memory_data, path)
        logger.info(f"Applied timeline event: {path} = {merged_content[:100]}...")

get_timeline_summary async

get_timeline_summary(start_date: str | None = None, end_date: str | None = None, llm=None, namespace: str = 'default') -> str

Generate a comprehensive timeline summary from stored timeline data.

Parameters:

Name Type Description Default
start_date str | None

Optional start date (YYYYMMDD format)

None
end_date str | None

Optional end date (YYYYMMDD format)

None
llm

Optional LLM for generating narrative summary

None

Returns:

Type Description
str

Timeline summary string

Source code in src/memoir/memento/timeline.py
async def get_timeline_summary(
    self,
    start_date: str | None = None,
    end_date: str | None = None,
    llm=None,
    namespace: str = "default",
) -> str:
    """
    Generate a comprehensive timeline summary from stored timeline data.

    Args:
        start_date: Optional start date (YYYYMMDD format)
        end_date: Optional end date (YYYYMMDD format)
        llm: Optional LLM for generating narrative summary

    Returns:
        Timeline summary string
    """
    try:
        # Search for all timeline memories
        timeline_memories = await self.memory_store.asearch(namespace, "timeline.")

        # Debug: log what we found
        logger.debug(f"Found {len(timeline_memories)} timeline memories")

        # Filter by date range if specified
        if start_date or end_date:
            timeline_memories = self._filter_by_date_range(
                timeline_memories, start_date, end_date
            )

        # Limit results if too many
        if len(timeline_memories) > 1000:
            timeline_memories = timeline_memories[:1000]

        if not timeline_memories:
            return "No timeline events available."

        # Organize timeline data chronologically
        timeline_data = self._organize_timeline_data(timeline_memories)

        # Generate summary
        if llm:
            return await self._generate_llm_summary(timeline_data, llm)
        else:
            return self._generate_structured_summary(timeline_data)

    except Exception as e:
        import traceback

        logger.error(f"Failed to generate timeline summary: {e}")
        logger.error(f"Full traceback: {traceback.format_exc()}")
        return f"Error generating timeline summary: {e}"

Timeline

Manages user timeline data and generates chronological event summaries.

Source code in src/memoir/memento/timeline.py
class TimelineMemento:
    """Manages user timeline data and generates chronological event summaries."""

    def __init__(self, memory_store):
        """Initialize timeline memento with memory store."""
        self.memory_store = memory_store

    async def apply_timeline_events(
        self,
        timeline_events: list[dict[str, str]],
        metadata: dict | None = None,
        original_content: str | None = None,
        namespace: str = "default",
    ) -> None:
        """
        Apply timeline events to the memory store.

        For same-day events, retrieves existing content and merges with new event.

        Args:
            timeline_events: List of timeline events with date and description
            metadata: Optional metadata to include with events
            namespace: Namespace to store timeline events in (default: "default")
        """
        if not timeline_events:
            return

        for event in timeline_events:
            date_str = event.get("date", "")  # Format: YYYYMMDD
            description = event.get("description", "")

            if not date_str or not description:
                logger.warning(f"Invalid timeline event: {event}")
                continue

            # Validate date format
            if not self._validate_date_format(date_str):
                logger.warning(f"Invalid date format (expected YYYYMMDD): {date_str}")
                continue

            # Create the timeline path
            path = f"timeline.{date_str}"

            # Check if there's already an event for this date
            existing_events = await self.memory_store.asearch(namespace, path)

            if existing_events:
                # Merge with existing event(s) for the same day
                existing_content = self._extract_existing_content(existing_events)
                merged_content = self._merge_events(existing_content, description)
            else:
                merged_content = description

            # Store the timeline event as a memory
            memory_data = {
                "raw_text": merged_content,
                "original_content": original_content
                or merged_content,  # Store original input if available
                "summary": f"Timeline event on {self._format_date_display(date_str)}",
                "structured_data": {
                    "timeline_date": date_str,
                    "timeline_content": merged_content,
                    "original_content": original_content or merged_content,
                    "update_type": "timeline_event",
                },
                "memory_type": "timeline_event",
            }

            logger.info(f"DEBUG: Storing timeline memory_data: {memory_data}")

            # Store directly using the memory store with correct signature (async)
            await self.memory_store.store_memory_async(namespace, memory_data, path)
            logger.info(f"Applied timeline event: {path} = {merged_content[:100]}...")

    async def get_timeline_summary(
        self,
        start_date: str | None = None,
        end_date: str | None = None,
        llm=None,
        namespace: str = "default",
    ) -> str:
        """
        Generate a comprehensive timeline summary from stored timeline data.

        Args:
            start_date: Optional start date (YYYYMMDD format)
            end_date: Optional end date (YYYYMMDD format)
            llm: Optional LLM for generating narrative summary

        Returns:
            Timeline summary string
        """
        try:
            # Search for all timeline memories
            timeline_memories = await self.memory_store.asearch(namespace, "timeline.")

            # Debug: log what we found
            logger.debug(f"Found {len(timeline_memories)} timeline memories")

            # Filter by date range if specified
            if start_date or end_date:
                timeline_memories = self._filter_by_date_range(
                    timeline_memories, start_date, end_date
                )

            # Limit results if too many
            if len(timeline_memories) > 1000:
                timeline_memories = timeline_memories[:1000]

            if not timeline_memories:
                return "No timeline events available."

            # Organize timeline data chronologically
            timeline_data = self._organize_timeline_data(timeline_memories)

            # Generate summary
            if llm:
                return await self._generate_llm_summary(timeline_data, llm)
            else:
                return self._generate_structured_summary(timeline_data)

        except Exception as e:
            import traceback

            logger.error(f"Failed to generate timeline summary: {e}")
            logger.error(f"Full traceback: {traceback.format_exc()}")
            return f"Error generating timeline summary: {e}"

    def _validate_date_format(self, date_str: str) -> bool:
        """Validate that date string is in YYYYMMDD format."""
        if len(date_str) != 8:
            return False
        try:
            datetime.strptime(date_str, "%Y%m%d")
            return True
        except ValueError:
            return False

    def _format_date_display(self, date_str: str) -> str:
        """Format YYYYMMDD to human-readable date."""
        try:
            dt = datetime.strptime(date_str, "%Y%m%d")
            return dt.strftime("%B %d, %Y")
        except ValueError:
            return date_str

    def _extract_existing_content(self, existing_events: list[tuple[str, Any]]) -> str:
        """Extract content from existing timeline events."""
        contents = []
        for _, data in existing_events:
            if isinstance(data, dict):
                # Check if this is a MemoryItem structure with content field
                if "content" in data and isinstance(data["content"], dict):
                    memory_data = data["content"]
                    structured_data = memory_data.get("structured_data", {})
                    timeline_content = structured_data.get("timeline_content", "")
                    if timeline_content:
                        contents.append(timeline_content)
                else:
                    # Try direct access
                    structured_data = data.get("structured_data", {})
                    timeline_content = structured_data.get("timeline_content", "")
                    if timeline_content:
                        contents.append(timeline_content)

        return " | ".join(contents) if contents else ""

    def _merge_events(self, existing_content: str, new_content: str) -> str:
        """Merge existing and new events for the same day."""
        if not existing_content:
            return new_content

        # Simple merge strategy - combine with separator
        # In production, you might want to use an LLM to create a better summary
        return f"{existing_content} | {new_content}"

    def _filter_by_date_range(
        self,
        memories: list[tuple[str, Any]],
        start_date: str | None,
        end_date: str | None,
    ) -> list[tuple[str, Any]]:
        """Filter timeline memories by date range."""
        filtered = []

        for semantic_key, data in memories:
            # Extract date from key (timeline.YYYYMMDD)
            if "." in semantic_key:
                date_str = semantic_key.split(".")[-1]
                if self._validate_date_format(date_str):
                    # Check if within range
                    if start_date and date_str < start_date:
                        continue
                    if end_date and date_str > end_date:
                        continue
                    filtered.append((semantic_key, data))

        return filtered

    def _organize_timeline_data(
        self, timeline_memories: list[tuple[str, Any]]
    ) -> dict[str, str]:
        """Organize timeline memories into a chronological structure."""
        organized = {}

        for semantic_key, data in timeline_memories:
            try:
                # Extract date from key
                if "." not in semantic_key:
                    continue

                date_str = semantic_key.split(".")[-1]
                if not self._validate_date_format(date_str):
                    continue

                # Handle the data format
                if isinstance(data, dict):
                    # Check if this is a MemoryItem structure with content field
                    if "content" in data and isinstance(data["content"], dict):
                        memory_data = data["content"]
                        structured_data = memory_data.get("structured_data", {})
                    else:
                        memory_data = data
                        structured_data = data.get("structured_data", {})

                    # Get the timeline content
                    timeline_content = structured_data.get("timeline_content")
                    update_type = structured_data.get("update_type")

                    # Only process memories that are actual timeline events
                    if update_type != "timeline_event":
                        logger.debug(
                            f"Skipping non-timeline-event memory: {semantic_key}"
                        )
                        continue

                    if timeline_content:
                        organized[date_str] = timeline_content

            except Exception as e:
                logger.warning(f"Failed to process timeline memory {semantic_key}: {e}")
                continue

        # Sort by date
        sorted_dates = sorted(organized.keys())
        return {date: organized[date] for date in sorted_dates}

    def _generate_structured_summary(self, timeline_data: dict[str, str]) -> str:
        """Generate a structured text summary of timeline data."""
        if not timeline_data:
            return "No timeline events available."

        summary_parts = ["=== USER TIMELINE ===\n"]

        # Group by year and month for better organization
        events_by_year = {}
        for date_str, content in timeline_data.items():
            year = date_str[:4]
            month = date_str[4:6]

            if year not in events_by_year:
                events_by_year[year] = {}
            if month not in events_by_year[year]:
                events_by_year[year][month] = []

            events_by_year[year][month].append((date_str, content))

        # Generate summary by year and month
        for year in sorted(events_by_year.keys(), reverse=True):
            summary_parts.append(f"\n{year}:")

            for month in sorted(events_by_year[year].keys(), reverse=True):
                month_name = datetime.strptime(f"{year}{month}01", "%Y%m%d").strftime(
                    "%B"
                )
                summary_parts.append(f"\n  {month_name}:")

                for date_str, content in sorted(
                    events_by_year[year][month], reverse=True
                ):
                    day = int(date_str[6:8])
                    summary_parts.append(f"    {day:2d}: {content}")

        return "\n".join(summary_parts)

    async def _generate_llm_summary(self, timeline_data: dict[str, str], llm) -> str:
        """Generate a narrative summary using LLM."""
        try:
            # Convert timeline data to a readable format for LLM
            structured_summary = self._generate_structured_summary(timeline_data)

            prompt = f"""Generate a comprehensive, narrative timeline summary based on the following chronological events. Create a natural, flowing description that captures the key events and their significance in the person's life.

Timeline Data:
{structured_summary}

Instructions:
- Write in third person
- Create a cohesive narrative that flows naturally through time
- Highlight significant events and patterns
- Group related events logically
- Keep it comprehensive but concise
- Focus on the progression and development over time

Generate a timeline narrative:"""

            response = await llm.ainvoke(prompt)

            if hasattr(response, "content"):
                narrative_summary = response.content
            else:
                narrative_summary = str(response)

            # Combine structured and narrative summaries
            return f"=== USER TIMELINE ===\n\n{narrative_summary}\n\n--- Detailed Timeline ---\n{structured_summary}"

        except Exception as e:
            logger.error(f"Failed to generate LLM summary: {e}")
            # Fallback to structured summary
            return self._generate_structured_summary(timeline_data)

__init__

__init__(memory_store)

Initialize timeline memento with memory store.

Source code in src/memoir/memento/timeline.py
def __init__(self, memory_store):
    """Initialize timeline memento with memory store."""
    self.memory_store = memory_store

apply_timeline_events async

apply_timeline_events(timeline_events: list[dict[str, str]], metadata: dict | None = None, original_content: str | None = None, namespace: str = 'default') -> None

Apply timeline events to the memory store.

For same-day events, retrieves existing content and merges with new event.

Parameters:

Name Type Description Default
timeline_events list[dict[str, str]]

List of timeline events with date and description

required
metadata dict | None

Optional metadata to include with events

None
namespace str

Namespace to store timeline events in (default: "default")

'default'
Source code in src/memoir/memento/timeline.py
async def apply_timeline_events(
    self,
    timeline_events: list[dict[str, str]],
    metadata: dict | None = None,
    original_content: str | None = None,
    namespace: str = "default",
) -> None:
    """
    Apply timeline events to the memory store.

    For same-day events, retrieves existing content and merges with new event.

    Args:
        timeline_events: List of timeline events with date and description
        metadata: Optional metadata to include with events
        namespace: Namespace to store timeline events in (default: "default")
    """
    if not timeline_events:
        return

    for event in timeline_events:
        date_str = event.get("date", "")  # Format: YYYYMMDD
        description = event.get("description", "")

        if not date_str or not description:
            logger.warning(f"Invalid timeline event: {event}")
            continue

        # Validate date format
        if not self._validate_date_format(date_str):
            logger.warning(f"Invalid date format (expected YYYYMMDD): {date_str}")
            continue

        # Create the timeline path
        path = f"timeline.{date_str}"

        # Check if there's already an event for this date
        existing_events = await self.memory_store.asearch(namespace, path)

        if existing_events:
            # Merge with existing event(s) for the same day
            existing_content = self._extract_existing_content(existing_events)
            merged_content = self._merge_events(existing_content, description)
        else:
            merged_content = description

        # Store the timeline event as a memory
        memory_data = {
            "raw_text": merged_content,
            "original_content": original_content
            or merged_content,  # Store original input if available
            "summary": f"Timeline event on {self._format_date_display(date_str)}",
            "structured_data": {
                "timeline_date": date_str,
                "timeline_content": merged_content,
                "original_content": original_content or merged_content,
                "update_type": "timeline_event",
            },
            "memory_type": "timeline_event",
        }

        logger.info(f"DEBUG: Storing timeline memory_data: {memory_data}")

        # Store directly using the memory store with correct signature (async)
        await self.memory_store.store_memory_async(namespace, memory_data, path)
        logger.info(f"Applied timeline event: {path} = {merged_content[:100]}...")

get_timeline_summary async

get_timeline_summary(start_date: str | None = None, end_date: str | None = None, llm=None, namespace: str = 'default') -> str

Generate a comprehensive timeline summary from stored timeline data.

Parameters:

Name Type Description Default
start_date str | None

Optional start date (YYYYMMDD format)

None
end_date str | None

Optional end date (YYYYMMDD format)

None
llm

Optional LLM for generating narrative summary

None

Returns:

Type Description
str

Timeline summary string

Source code in src/memoir/memento/timeline.py
async def get_timeline_summary(
    self,
    start_date: str | None = None,
    end_date: str | None = None,
    llm=None,
    namespace: str = "default",
) -> str:
    """
    Generate a comprehensive timeline summary from stored timeline data.

    Args:
        start_date: Optional start date (YYYYMMDD format)
        end_date: Optional end date (YYYYMMDD format)
        llm: Optional LLM for generating narrative summary

    Returns:
        Timeline summary string
    """
    try:
        # Search for all timeline memories
        timeline_memories = await self.memory_store.asearch(namespace, "timeline.")

        # Debug: log what we found
        logger.debug(f"Found {len(timeline_memories)} timeline memories")

        # Filter by date range if specified
        if start_date or end_date:
            timeline_memories = self._filter_by_date_range(
                timeline_memories, start_date, end_date
            )

        # Limit results if too many
        if len(timeline_memories) > 1000:
            timeline_memories = timeline_memories[:1000]

        if not timeline_memories:
            return "No timeline events available."

        # Organize timeline data chronologically
        timeline_data = self._organize_timeline_data(timeline_memories)

        # Generate summary
        if llm:
            return await self._generate_llm_summary(timeline_data, llm)
        else:
            return self._generate_structured_summary(timeline_data)

    except Exception as e:
        import traceback

        logger.error(f"Failed to generate timeline summary: {e}")
        logger.error(f"Full traceback: {traceback.format_exc()}")
        return f"Error generating timeline summary: {e}"

Submodules

memoir.memento.location module

memoir.memento.location

Location Memento for Spatial Memory Management and Geographic Event Storage.

Handles location-based event storage, geographic organization, and location summaries. Events are stored under location.{location_name} keys with automatic merging of same-location events.

LocationMemento

Manages user location data and generates geographic event summaries.

Source code in src/memoir/memento/location.py
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
class LocationMemento:
    """Manages user location data and generates geographic event summaries."""

    def __init__(self, memory_store):
        """Initialize location memento with memory store."""
        self.memory_store = memory_store

    async def apply_location_events(
        self,
        location_events: list[dict[str, str]],
        metadata: dict | None = None,
        namespace: str = "default",
    ) -> None:
        """
        Apply location events to the memory store.

        For same-location events, retrieves existing content and merges with new event.

        Args:
            location_events: List of location events with location and description
            metadata: Optional metadata to include with events
            namespace: Namespace to store location events in (default: "default")
        """
        logger.debug(
            f"LocationManager.apply_location_events called with {len(location_events) if location_events else 0} events"
        )
        if not location_events:
            logger.debug("No location events provided to apply_location_events")
            return

        for event in location_events:
            location_name = event.get("location", "")
            description = event.get("description", "")

            if not location_name or not description:
                logger.warning(f"Invalid location event: {event}")
                continue

            # Normalize location name for consistent storage
            normalized_location = self._normalize_location_name(location_name)

            if not normalized_location:
                logger.debug(f"Invalid location name: {location_name}")
                continue

            # Create the location path
            location_path = f"location.{normalized_location}"

            try:
                await self._store_or_merge_location_event(
                    location_path, description, metadata, namespace
                )
                logger.debug(f"Applied location event: {location_path} - {description}")
            except Exception as e:
                logger.error(f"Failed to apply location event {location_path}: {e}")

    def _normalize_location_name(self, location_name: str) -> str:
        """
        Normalize location name for consistent storage.

        Args:
            location_name: Raw location name from LLM

        Returns:
            Normalized location name suitable for path storage
        """
        if not location_name or not isinstance(location_name, str):
            return ""

        # Clean and normalize the location name
        # Remove extra whitespace and convert to lowercase
        normalized = location_name.strip().lower()

        # Replace spaces and special characters with underscores
        normalized = re.sub(
            r"[^\w\s-]", "", normalized
        )  # Remove special chars except spaces and hyphens
        normalized = re.sub(
            r"[\s-]+", "_", normalized
        )  # Replace spaces/hyphens with underscores
        normalized = re.sub(r"_+", "_", normalized)  # Collapse multiple underscores
        normalized = normalized.strip("_")  # Remove leading/trailing underscores

        # Handle common location patterns and abbreviations
        location_mappings = {
            "new_york_city": "new_york_city",
            "nyc": "new_york_city",
            "ny": "new_york",
            "california": "california",
            "ca": "california",
            "san_francisco": "san_francisco",
            "sf": "san_francisco",
            "los_angeles": "los_angeles",
            "la": "los_angeles",
            "united_states": "united_states",
            "usa": "united_states",
            "us": "united_states",
        }

        # Apply mappings if available
        if normalized in location_mappings:
            normalized = location_mappings[normalized]

        # Ensure minimum length and validity
        if len(normalized) < 2:
            return ""

        return normalized

    async def _store_or_merge_location_event(
        self,
        location_path: str,
        description: str,
        metadata: dict | None = None,
        namespace: str = "default",
    ) -> None:
        """
        Store location event or merge with existing location events.

        Args:
            location_path: Storage path for the location (e.g., "location.san_francisco")
            description: Event description
            metadata: Optional metadata
            namespace: Namespace to store location data in (default: "default")
        """
        # namespace parameter is passed to function

        # Check if location already has events
        existing_items = await self.memory_store.asearch(namespace, location_path)

        if existing_items:
            # Merge with existing location events
            _, existing_data = existing_items[0]

            if isinstance(existing_data, str):
                existing_content = existing_data
            elif isinstance(existing_data, dict):
                existing_content = existing_data.get("raw_text", "")
            else:
                existing_content = str(existing_data)

            # Merge descriptions, avoiding duplicates
            merged_content = self._merge_location_descriptions(
                existing_content, description
            )

            content = {
                "raw_text": merged_content,
                "summary": f"Location events at {location_path.split('.')[1].replace('_', ' ').title()}",
                "structured_data": {
                    "location_name": location_path.split(".")[1]
                    .replace("_", " ")
                    .title(),
                    "location_content": merged_content,
                    "update_type": "location_event",
                },
                "memory_type": "location_event",
            }
        else:
            # Create new location event
            content = {
                "raw_text": description,
                "summary": f"Location event at {location_path.split('.')[1].replace('_', ' ').title()}",
                "structured_data": {
                    "location_name": location_path.split(".")[1]
                    .replace("_", " ")
                    .title(),
                    "location_content": description,
                    "update_type": "location_event",
                },
                "memory_type": "location_event",
            }

        # Include metadata if provided
        if metadata:
            content["metadata"] = metadata

        # Store the location event
        logger.debug(
            f"About to call store_memory_async with namespace='{namespace}', path='{location_path}'"
        )
        logger.debug(f"Content to store: {content}")

        result = await self.memory_store.store_memory_async(
            namespace, content, location_path
        )
        logger.debug(f"store_memory_async returned: {result}")

        # Debug: immediately test if we can find what we just stored
        try:
            test_search = await self.memory_store.asearch(namespace, location_path)
            logger.debug(
                f"Immediate search for '{location_path}' found {len(test_search)} items"
            )
            if test_search:
                logger.debug(f"Found item: {test_search[0]}")

            # Also try searching with prefix
            prefix_search = await self.memory_store.asearch(namespace, "location.")
            logger.debug(
                f"Prefix search for 'location.' found {len(prefix_search)} items"
            )

        except Exception as e:
            logger.debug(f"Immediate search test failed: {e}")

    def _merge_location_descriptions(self, existing: str, new: str) -> str:
        """
        Merge location event descriptions, avoiding duplicates.

        Args:
            existing: Existing location event descriptions
            new: New location event description

        Returns:
            Merged location descriptions
        """
        if not existing:
            return new

        if not new:
            return existing

        # Split by common delimiters
        existing_events = [
            event.strip() for event in existing.split("|") if event.strip()
        ]

        # Check if new event is already present (fuzzy matching)
        new_lower = new.lower()
        for existing_event in existing_events:
            if existing_event.lower() == new_lower:
                return existing  # Duplicate, return existing

        # Add new event
        existing_events.append(new.strip())
        return " | ".join(existing_events)

    async def get_location_summary(
        self, llm: Any | None = None, namespace: str = "default"
    ) -> str:
        """
        Generate a summary of all location events.

        Args:
            llm: Optional LLM for generating narrative summaries
            namespace: Namespace to search for location data (default: "default")

        Returns:
            String summary of location events
        """
        try:
            # namespace parameter is passed to function

            # Search for all location events
            logger.debug(
                f"Searching for location events with query: namespace='{namespace}', prefix='location.'"
            )
            all_items = await self.memory_store.asearch(namespace, "location.")
            logger.debug(f"Search returned {len(all_items)} items")

            # Debug: log what we found
            if all_items:
                logger.info(f"Found {len(all_items)} items with location. prefix")
                for item in all_items[:3]:  # Log first few items
                    logger.info(f"Location item: {item}")
            else:
                logger.debug("No items found with location. prefix")

                # Debug: search for ANY items with location data
                logger.debug("Searching for ANY items with location data...")
                all_items_debug = await self.memory_store.asearch(namespace, "")
                location_items_debug = []
                for path, data in all_items_debug:
                    if isinstance(data, dict) and (
                        data.get("memory_type") == "location_event"
                        or "location_name" in data.get("structured_data", {})
                    ):
                        location_items_debug.append((path, data))
                        logger.debug(f"Found location data under path: {path}")

                if location_items_debug:
                    logger.debug(
                        f"Found {len(location_items_debug)} location events but not under location.* paths!"
                    )
                    return self._generate_structured_location_summary(
                        location_items_debug
                    )
                else:
                    logger.debug("No location events found anywhere in memory store!")

            location_items = all_items  # All items should already have location. prefix

            if not location_items:
                return "No location events available."

            # If no LLM provided, generate structured summary
            if not llm:
                return self._generate_structured_location_summary(location_items)

            # Generate LLM-based narrative summary
            return await self._generate_llm_location_summary(location_items, llm)

        except Exception as e:
            logger.error(f"Failed to generate location summary: {e}")
            logger.error(f"Exception details: {type(e).__name__}: {e!s}")
            import traceback

            logger.error(f"Traceback: {traceback.format_exc()}")
            return "Error generating location summary."

    def _generate_structured_location_summary(self, location_items: list) -> str:
        """Generate a structured location summary without LLM."""
        summary_lines = ["=== USER LOCATION SUMMARY ===", ""]

        # Group and sort locations
        locations = {}
        for path, data in location_items:
            location_name = path.split(".", 1)[1].replace("_", " ").title()

            # Handle nested memory object structure from asearch results
            if isinstance(data, dict):
                # Check if this is a nested memory object with 'content' field
                if "content" in data and isinstance(data["content"], dict):
                    # Extract from nested structure: data['content']['raw_text']
                    content = data["content"].get("raw_text", str(data))
                else:
                    # Direct structure: data['raw_text']
                    content = data.get("raw_text", str(data))
            else:
                content = str(data)

            locations[location_name] = content

        # Sort locations alphabetically
        for location_name in sorted(locations.keys()):
            content = locations[location_name]
            summary_lines.append(f"{location_name}:")

            # Split multiple events and format nicely
            events = content.split(" | ")
            for event in events:
                if event.strip():
                    summary_lines.append(f"  - {event.strip()}")
            summary_lines.append("")

        return "\n".join(summary_lines)

    async def _generate_llm_location_summary(
        self, location_items: list, llm: Any
    ) -> str:
        """Generate an LLM-based narrative location summary."""
        # Prepare location data for LLM
        location_data = []
        for path, data in location_items:
            location_name = path.split(".", 1)[1].replace("_", " ").title()

            if isinstance(data, dict):
                content = data.get("raw_text", str(data))
            else:
                content = str(data)

            location_data.append(f"{location_name}: {content}")

        location_text = "\n".join(location_data)

        prompt = f"""Create a concise narrative summary of the user's location-related experiences and activities. Focus on places they've been, lived, worked, or had significant experiences.

Location Data:
{location_text}

Create a narrative summary that:
1. Groups related locations geographically when possible
2. Highlights significant places and experiences
3. Shows patterns in the user's movements or preferences
4. Keeps the summary concise but informative

Location Summary:"""

        try:
            response = await llm.ainvoke(prompt)
            return response.content.strip()
        except Exception as e:
            logger.error(f"LLM location summary failed: {e}")
            return self._generate_structured_location_summary(location_items)

    async def get_location_events_for_search(
        self, location_query: str, namespace: str = "default"
    ) -> list[dict]:
        """
        Get location events relevant to a search query.

        Args:
            location_query: Search query for locations
            namespace: Namespace to search for location data (default: "default")

        Returns:
            List of relevant location events
        """
        try:
            # namespace parameter is passed to function

            # Search for location events
            all_items = await self.memory_store.asearch(namespace, "location.")
            location_items = [
                (path, data) for path, data in all_items if path.startswith("location.")
            ]

            # Filter by relevance to query
            relevant_events = []
            query_lower = location_query.lower()

            for path, data in location_items:
                location_name = path.split(".", 1)[1].replace("_", " ")

                if isinstance(data, dict):
                    content = data.get("raw_text", str(data))
                else:
                    content = str(data)

                # Check if query matches location name or content
                if (
                    query_lower in location_name.lower()
                    or query_lower in content.lower()
                ):
                    relevant_events.append(
                        {
                            "location": location_name.title(),
                            "content": content,
                            "path": path,
                        }
                    )

            return relevant_events

        except Exception as e:
            logger.error(f"Failed to get location events for search: {e}")
            return []

__init__

__init__(memory_store)

Initialize location memento with memory store.

Source code in src/memoir/memento/location.py
def __init__(self, memory_store):
    """Initialize location memento with memory store."""
    self.memory_store = memory_store

apply_location_events async

apply_location_events(location_events: list[dict[str, str]], metadata: dict | None = None, namespace: str = 'default') -> None

Apply location events to the memory store.

For same-location events, retrieves existing content and merges with new event.

Parameters:

Name Type Description Default
location_events list[dict[str, str]]

List of location events with location and description

required
metadata dict | None

Optional metadata to include with events

None
namespace str

Namespace to store location events in (default: "default")

'default'
Source code in src/memoir/memento/location.py
async def apply_location_events(
    self,
    location_events: list[dict[str, str]],
    metadata: dict | None = None,
    namespace: str = "default",
) -> None:
    """
    Apply location events to the memory store.

    For same-location events, retrieves existing content and merges with new event.

    Args:
        location_events: List of location events with location and description
        metadata: Optional metadata to include with events
        namespace: Namespace to store location events in (default: "default")
    """
    logger.debug(
        f"LocationManager.apply_location_events called with {len(location_events) if location_events else 0} events"
    )
    if not location_events:
        logger.debug("No location events provided to apply_location_events")
        return

    for event in location_events:
        location_name = event.get("location", "")
        description = event.get("description", "")

        if not location_name or not description:
            logger.warning(f"Invalid location event: {event}")
            continue

        # Normalize location name for consistent storage
        normalized_location = self._normalize_location_name(location_name)

        if not normalized_location:
            logger.debug(f"Invalid location name: {location_name}")
            continue

        # Create the location path
        location_path = f"location.{normalized_location}"

        try:
            await self._store_or_merge_location_event(
                location_path, description, metadata, namespace
            )
            logger.debug(f"Applied location event: {location_path} - {description}")
        except Exception as e:
            logger.error(f"Failed to apply location event {location_path}: {e}")

get_location_summary async

get_location_summary(llm: Any | None = None, namespace: str = 'default') -> str

Generate a summary of all location events.

Parameters:

Name Type Description Default
llm Any | None

Optional LLM for generating narrative summaries

None
namespace str

Namespace to search for location data (default: "default")

'default'

Returns:

Type Description
str

String summary of location events

Source code in src/memoir/memento/location.py
async def get_location_summary(
    self, llm: Any | None = None, namespace: str = "default"
) -> str:
    """
    Generate a summary of all location events.

    Args:
        llm: Optional LLM for generating narrative summaries
        namespace: Namespace to search for location data (default: "default")

    Returns:
        String summary of location events
    """
    try:
        # namespace parameter is passed to function

        # Search for all location events
        logger.debug(
            f"Searching for location events with query: namespace='{namespace}', prefix='location.'"
        )
        all_items = await self.memory_store.asearch(namespace, "location.")
        logger.debug(f"Search returned {len(all_items)} items")

        # Debug: log what we found
        if all_items:
            logger.info(f"Found {len(all_items)} items with location. prefix")
            for item in all_items[:3]:  # Log first few items
                logger.info(f"Location item: {item}")
        else:
            logger.debug("No items found with location. prefix")

            # Debug: search for ANY items with location data
            logger.debug("Searching for ANY items with location data...")
            all_items_debug = await self.memory_store.asearch(namespace, "")
            location_items_debug = []
            for path, data in all_items_debug:
                if isinstance(data, dict) and (
                    data.get("memory_type") == "location_event"
                    or "location_name" in data.get("structured_data", {})
                ):
                    location_items_debug.append((path, data))
                    logger.debug(f"Found location data under path: {path}")

            if location_items_debug:
                logger.debug(
                    f"Found {len(location_items_debug)} location events but not under location.* paths!"
                )
                return self._generate_structured_location_summary(
                    location_items_debug
                )
            else:
                logger.debug("No location events found anywhere in memory store!")

        location_items = all_items  # All items should already have location. prefix

        if not location_items:
            return "No location events available."

        # If no LLM provided, generate structured summary
        if not llm:
            return self._generate_structured_location_summary(location_items)

        # Generate LLM-based narrative summary
        return await self._generate_llm_location_summary(location_items, llm)

    except Exception as e:
        logger.error(f"Failed to generate location summary: {e}")
        logger.error(f"Exception details: {type(e).__name__}: {e!s}")
        import traceback

        logger.error(f"Traceback: {traceback.format_exc()}")
        return "Error generating location summary."
get_location_events_for_search(location_query: str, namespace: str = 'default') -> list[dict]

Get location events relevant to a search query.

Parameters:

Name Type Description Default
location_query str

Search query for locations

required
namespace str

Namespace to search for location data (default: "default")

'default'

Returns:

Type Description
list[dict]

List of relevant location events

Source code in src/memoir/memento/location.py
async def get_location_events_for_search(
    self, location_query: str, namespace: str = "default"
) -> list[dict]:
    """
    Get location events relevant to a search query.

    Args:
        location_query: Search query for locations
        namespace: Namespace to search for location data (default: "default")

    Returns:
        List of relevant location events
    """
    try:
        # namespace parameter is passed to function

        # Search for location events
        all_items = await self.memory_store.asearch(namespace, "location.")
        location_items = [
            (path, data) for path, data in all_items if path.startswith("location.")
        ]

        # Filter by relevance to query
        relevant_events = []
        query_lower = location_query.lower()

        for path, data in location_items:
            location_name = path.split(".", 1)[1].replace("_", " ")

            if isinstance(data, dict):
                content = data.get("raw_text", str(data))
            else:
                content = str(data)

            # Check if query matches location name or content
            if (
                query_lower in location_name.lower()
                or query_lower in content.lower()
            ):
                relevant_events.append(
                    {
                        "location": location_name.title(),
                        "content": content,
                        "path": path,
                    }
                )

        return relevant_events

    except Exception as e:
        logger.error(f"Failed to get location events for search: {e}")
        return []

memoir.memento.profile module

memoir.memento.profile

Profile Memento for User Profile Generation and Management.

Handles profile serialization, summary generation, and profile updates.

ProfileMemento

Manages user profile data and generates profile summaries.

Source code in src/memoir/memento/profile.py
class ProfileMemento:
    """Manages user profile data and generates profile summaries."""

    def __init__(self, memory_store):
        """Initialize profile memento with memory store."""
        self.memory_store = memory_store

    async def apply_profile_updates(
        self,
        profile_updates: list[dict[str, str]],
        metadata: dict | None = None,
        namespace: str = "default",
    ) -> None:
        """
        Apply profile updates to the memory store.

        Args:
            profile_updates: List of profile updates with path and value
            metadata: Optional metadata to include with updates
            namespace: Namespace to store the profile updates in (default: "default")
        """
        if not profile_updates:
            return

        for update in profile_updates:
            path = update.get("path", "")
            value = update.get("value", "")

            if not path or not value:
                logger.warning(f"Invalid profile update: {update}")
                continue

            # Check if this is a profile path
            if not path.startswith("profile."):
                logger.warning(f"Non-profile path in profile update: {path}")
                continue

            # Store the profile update as a memory with special handling
            memory_data = {
                "raw_text": value,
                "summary": f"Profile update: {path.split('.')[-1]} = {value}",
                "structured_data": {
                    "profile_field": path,
                    "profile_value": value,
                    "update_type": "profile_update",
                },
                "memory_type": "profile_update",
            }

            # Store directly using the async method (consistent with timeline manager)
            await self.memory_store.store_memory_async(namespace, memory_data, path)
            logger.info(f"Applied profile update: {path} = {value}")

    async def get_profile_summary(self, llm=None, namespace: str = "default") -> str:
        """
        Generate a comprehensive profile summary from stored profile data.

        Args:
            llm: Optional LLM for generating narrative summary
            namespace: Namespace to search for profile data (default: "default")

        Returns:
            Profile summary string
        """
        try:
            # Search for all profile memories using the correct method signature
            # Use provided namespace string as expected by asearch method
            profile_memories = await self.memory_store.asearch(namespace, "profile.")

            # Debug: log what we found
            logger.debug(f"Found {len(profile_memories)} profile memories")

            # Limit results manually if needed
            if len(profile_memories) > 1000:
                profile_memories = profile_memories[:1000]

            if not profile_memories:
                return "No profile information available."

            # Organize profile data by category
            profile_data = self._organize_profile_data(profile_memories)

            # Generate summary
            if llm:
                return await self._generate_llm_summary(profile_data, llm)
            else:
                return self._generate_structured_summary(profile_data)

        except Exception as e:
            import traceback

            logger.error(f"Failed to generate profile summary: {e}")
            logger.error(f"Full traceback: {traceback.format_exc()}")
            return f"Error generating profile summary: {e}"

    def _organize_profile_data(
        self, profile_memories: list[tuple[str, Any]]
    ) -> dict[str, dict[str, str]]:
        """Organize profile memories into a structured hierarchy."""
        organized = {}

        for semantic_key, data in profile_memories:
            try:
                # Ensure semantic_key is a string
                if not isinstance(semantic_key, str):
                    logger.warning(
                        f"Non-string semantic key: {type(semantic_key)}: {semantic_key}"
                    )
                    semantic_key = str(semantic_key)

                # Handle the data format - it could be a MemoryItem dict or other format
                if isinstance(data, dict):
                    # Check if this is a MemoryItem structure with content field
                    if "content" in data and isinstance(data["content"], dict):
                        # This is a MemoryItem with content - extract the actual memory data
                        memory_data = data["content"]
                        structured_data = memory_data.get("structured_data", {})
                    else:
                        # This is the memory data directly
                        memory_data = data
                        structured_data = data.get("structured_data", {})
                else:
                    # If it's not a dict, try to extract meaningful data
                    logger.warning(
                        f"Unexpected data format for {semantic_key}: {type(data)}"
                    )
                    continue

                # Get the profile path and value
                profile_field = structured_data.get("profile_field")
                profile_value = structured_data.get("profile_value")
                update_type = structured_data.get("update_type")

                # Only process memories that are actual profile updates
                if update_type != "profile_update":
                    logger.debug(f"Skipping non-profile-update memory: {semantic_key}")
                    continue

                if not profile_field or not profile_value:
                    logger.warning(
                        f"Profile update memory missing field or value: {semantic_key}"
                    )
                    continue

                # Ensure profile_field is a string
                if not isinstance(profile_field, str):
                    logger.warning(
                        f"Non-string profile_field: {type(profile_field)}: {profile_field}"
                    )
                    profile_field = (
                        str(profile_field)
                        if profile_field is not None
                        else semantic_key
                    )

                if profile_field and profile_value:
                    # Convert profile_value to string if it's not already
                    if isinstance(profile_value, dict):
                        # If it's a dict, convert to JSON string
                        import json

                        profile_value_str = json.dumps(profile_value)
                    elif isinstance(profile_value, (list, tuple)):
                        # If it's a list/tuple, join as string
                        profile_value_str = ", ".join(str(x) for x in profile_value)
                    else:
                        profile_value_str = str(profile_value)

                    # Build nested dictionary structure
                    parts = profile_field.split(".")
                    current = organized

                    # Navigate to the correct nested position
                    for part in parts[:-1]:  # All except the last part
                        # Ensure part is a string
                        part = str(part) if part is not None else "unknown"
                        if part not in current:
                            current[part] = {}
                        current = current[part]

                    # Set the final value as string
                    final_key = str(parts[-1]) if parts[-1] is not None else "unknown"
                    current[final_key] = profile_value_str

            except Exception as e:
                logger.warning(f"Failed to process profile memory {semantic_key}: {e}")
                continue

        return organized

    def _generate_structured_summary(self, profile_data: dict[str, Any]) -> str:
        """Generate a structured text summary of profile data."""
        if not profile_data:
            return "No profile information available."

        summary_parts = ["=== USER PROFILE SUMMARY ===\n"]

        # Process each main category
        category_order = [
            ("personal", "Personal Information"),
            ("professional", "Professional Profile"),
            ("health", "Health & Wellness"),
            ("finance", "Financial Profile"),
            ("living", "Living Situation"),
            ("relationships", "Relationships & Social"),
            ("goals", "Goals & Aspirations"),
        ]

        for key, title in category_order:
            if key in profile_data:
                summary_parts.append(f"\n{title}:")
                summary_parts.append(
                    self._format_category_data(profile_data[key], indent=1)
                )

        # Add any other categories not in the standard order
        processed_keys = {key for key, _ in category_order}
        for key, data in profile_data.items():
            if key not in processed_keys:
                title = key.replace("_", " ").title()
                summary_parts.append(f"\n{title}:")
                summary_parts.append(self._format_category_data(data, indent=1))

        return "\n".join(summary_parts)

    def _format_category_data(self, data: dict[str, Any], indent: int = 0) -> str:
        """Format category data with proper indentation."""
        if not data:
            return "  " * indent + "No information available"

        lines = []
        prefix = "  " * indent

        for key, value in data.items():
            if isinstance(value, dict):
                # Nested category
                category_title = key.replace("_", " ").title()
                lines.append(f"{prefix}{category_title}:")
                lines.append(self._format_category_data(value, indent + 1))
            else:
                # Leaf value
                field_name = key.replace("_", " ").title()
                lines.append(f"{prefix}- {field_name}: {value}")

        return "\n".join(lines)

    async def _generate_llm_summary(self, profile_data: dict[str, Any], llm) -> str:
        """Generate a narrative summary using LLM."""
        try:
            # Convert profile data to a readable format for LLM
            structured_summary = self._generate_structured_summary(profile_data)

            prompt = f"""Generate a comprehensive, narrative profile summary based on the following structured profile data. Create a natural, flowing description that captures the key aspects of this person's life, background, and characteristics.

Profile Data:
{structured_summary}

Instructions:
- Write in third person
- Create a cohesive narrative that flows naturally
- Focus on the most important and defining characteristics
- Group related information together logically
- Keep it comprehensive but concise (2-3 paragraphs)
- Avoid simply listing facts - weave them into a story

Generate a professional profile summary:"""

            response = await llm.ainvoke(prompt)

            if hasattr(response, "content"):
                narrative_summary = response.content
            else:
                narrative_summary = str(response)

            # Combine structured and narrative summaries
            return f"=== USER PROFILE SUMMARY ===\n\n{narrative_summary}\n\n--- Detailed Profile Data ---\n{structured_summary}"

        except Exception as e:
            logger.error(f"Failed to generate LLM summary: {e}")
            # Fallback to structured summary
            return self._generate_structured_summary(profile_data)

__init__

__init__(memory_store)

Initialize profile memento with memory store.

Source code in src/memoir/memento/profile.py
def __init__(self, memory_store):
    """Initialize profile memento with memory store."""
    self.memory_store = memory_store

apply_profile_updates async

apply_profile_updates(profile_updates: list[dict[str, str]], metadata: dict | None = None, namespace: str = 'default') -> None

Apply profile updates to the memory store.

Parameters:

Name Type Description Default
profile_updates list[dict[str, str]]

List of profile updates with path and value

required
metadata dict | None

Optional metadata to include with updates

None
namespace str

Namespace to store the profile updates in (default: "default")

'default'
Source code in src/memoir/memento/profile.py
async def apply_profile_updates(
    self,
    profile_updates: list[dict[str, str]],
    metadata: dict | None = None,
    namespace: str = "default",
) -> None:
    """
    Apply profile updates to the memory store.

    Args:
        profile_updates: List of profile updates with path and value
        metadata: Optional metadata to include with updates
        namespace: Namespace to store the profile updates in (default: "default")
    """
    if not profile_updates:
        return

    for update in profile_updates:
        path = update.get("path", "")
        value = update.get("value", "")

        if not path or not value:
            logger.warning(f"Invalid profile update: {update}")
            continue

        # Check if this is a profile path
        if not path.startswith("profile."):
            logger.warning(f"Non-profile path in profile update: {path}")
            continue

        # Store the profile update as a memory with special handling
        memory_data = {
            "raw_text": value,
            "summary": f"Profile update: {path.split('.')[-1]} = {value}",
            "structured_data": {
                "profile_field": path,
                "profile_value": value,
                "update_type": "profile_update",
            },
            "memory_type": "profile_update",
        }

        # Store directly using the async method (consistent with timeline manager)
        await self.memory_store.store_memory_async(namespace, memory_data, path)
        logger.info(f"Applied profile update: {path} = {value}")

get_profile_summary async

get_profile_summary(llm=None, namespace: str = 'default') -> str

Generate a comprehensive profile summary from stored profile data.

Parameters:

Name Type Description Default
llm

Optional LLM for generating narrative summary

None
namespace str

Namespace to search for profile data (default: "default")

'default'

Returns:

Type Description
str

Profile summary string

Source code in src/memoir/memento/profile.py
async def get_profile_summary(self, llm=None, namespace: str = "default") -> str:
    """
    Generate a comprehensive profile summary from stored profile data.

    Args:
        llm: Optional LLM for generating narrative summary
        namespace: Namespace to search for profile data (default: "default")

    Returns:
        Profile summary string
    """
    try:
        # Search for all profile memories using the correct method signature
        # Use provided namespace string as expected by asearch method
        profile_memories = await self.memory_store.asearch(namespace, "profile.")

        # Debug: log what we found
        logger.debug(f"Found {len(profile_memories)} profile memories")

        # Limit results manually if needed
        if len(profile_memories) > 1000:
            profile_memories = profile_memories[:1000]

        if not profile_memories:
            return "No profile information available."

        # Organize profile data by category
        profile_data = self._organize_profile_data(profile_memories)

        # Generate summary
        if llm:
            return await self._generate_llm_summary(profile_data, llm)
        else:
            return self._generate_structured_summary(profile_data)

    except Exception as e:
        import traceback

        logger.error(f"Failed to generate profile summary: {e}")
        logger.error(f"Full traceback: {traceback.format_exc()}")
        return f"Error generating profile summary: {e}"

memoir.memento.timeline module

memoir.memento.timeline

Timeline Memento for User Event History and Temporal Memory Management.

Handles chronological event storage, date-based organization, and timeline summaries. Events are stored under timeline.YYYYMMDD keys with automatic merging of same-day events.

TimelineMemento

Manages user timeline data and generates chronological event summaries.

Source code in src/memoir/memento/timeline.py
class TimelineMemento:
    """Manages user timeline data and generates chronological event summaries."""

    def __init__(self, memory_store):
        """Initialize timeline memento with memory store."""
        self.memory_store = memory_store

    async def apply_timeline_events(
        self,
        timeline_events: list[dict[str, str]],
        metadata: dict | None = None,
        original_content: str | None = None,
        namespace: str = "default",
    ) -> None:
        """
        Apply timeline events to the memory store.

        For same-day events, retrieves existing content and merges with new event.

        Args:
            timeline_events: List of timeline events with date and description
            metadata: Optional metadata to include with events
            namespace: Namespace to store timeline events in (default: "default")
        """
        if not timeline_events:
            return

        for event in timeline_events:
            date_str = event.get("date", "")  # Format: YYYYMMDD
            description = event.get("description", "")

            if not date_str or not description:
                logger.warning(f"Invalid timeline event: {event}")
                continue

            # Validate date format
            if not self._validate_date_format(date_str):
                logger.warning(f"Invalid date format (expected YYYYMMDD): {date_str}")
                continue

            # Create the timeline path
            path = f"timeline.{date_str}"

            # Check if there's already an event for this date
            existing_events = await self.memory_store.asearch(namespace, path)

            if existing_events:
                # Merge with existing event(s) for the same day
                existing_content = self._extract_existing_content(existing_events)
                merged_content = self._merge_events(existing_content, description)
            else:
                merged_content = description

            # Store the timeline event as a memory
            memory_data = {
                "raw_text": merged_content,
                "original_content": original_content
                or merged_content,  # Store original input if available
                "summary": f"Timeline event on {self._format_date_display(date_str)}",
                "structured_data": {
                    "timeline_date": date_str,
                    "timeline_content": merged_content,
                    "original_content": original_content or merged_content,
                    "update_type": "timeline_event",
                },
                "memory_type": "timeline_event",
            }

            logger.info(f"DEBUG: Storing timeline memory_data: {memory_data}")

            # Store directly using the memory store with correct signature (async)
            await self.memory_store.store_memory_async(namespace, memory_data, path)
            logger.info(f"Applied timeline event: {path} = {merged_content[:100]}...")

    async def get_timeline_summary(
        self,
        start_date: str | None = None,
        end_date: str | None = None,
        llm=None,
        namespace: str = "default",
    ) -> str:
        """
        Generate a comprehensive timeline summary from stored timeline data.

        Args:
            start_date: Optional start date (YYYYMMDD format)
            end_date: Optional end date (YYYYMMDD format)
            llm: Optional LLM for generating narrative summary

        Returns:
            Timeline summary string
        """
        try:
            # Search for all timeline memories
            timeline_memories = await self.memory_store.asearch(namespace, "timeline.")

            # Debug: log what we found
            logger.debug(f"Found {len(timeline_memories)} timeline memories")

            # Filter by date range if specified
            if start_date or end_date:
                timeline_memories = self._filter_by_date_range(
                    timeline_memories, start_date, end_date
                )

            # Limit results if too many
            if len(timeline_memories) > 1000:
                timeline_memories = timeline_memories[:1000]

            if not timeline_memories:
                return "No timeline events available."

            # Organize timeline data chronologically
            timeline_data = self._organize_timeline_data(timeline_memories)

            # Generate summary
            if llm:
                return await self._generate_llm_summary(timeline_data, llm)
            else:
                return self._generate_structured_summary(timeline_data)

        except Exception as e:
            import traceback

            logger.error(f"Failed to generate timeline summary: {e}")
            logger.error(f"Full traceback: {traceback.format_exc()}")
            return f"Error generating timeline summary: {e}"

    def _validate_date_format(self, date_str: str) -> bool:
        """Validate that date string is in YYYYMMDD format."""
        if len(date_str) != 8:
            return False
        try:
            datetime.strptime(date_str, "%Y%m%d")
            return True
        except ValueError:
            return False

    def _format_date_display(self, date_str: str) -> str:
        """Format YYYYMMDD to human-readable date."""
        try:
            dt = datetime.strptime(date_str, "%Y%m%d")
            return dt.strftime("%B %d, %Y")
        except ValueError:
            return date_str

    def _extract_existing_content(self, existing_events: list[tuple[str, Any]]) -> str:
        """Extract content from existing timeline events."""
        contents = []
        for _, data in existing_events:
            if isinstance(data, dict):
                # Check if this is a MemoryItem structure with content field
                if "content" in data and isinstance(data["content"], dict):
                    memory_data = data["content"]
                    structured_data = memory_data.get("structured_data", {})
                    timeline_content = structured_data.get("timeline_content", "")
                    if timeline_content:
                        contents.append(timeline_content)
                else:
                    # Try direct access
                    structured_data = data.get("structured_data", {})
                    timeline_content = structured_data.get("timeline_content", "")
                    if timeline_content:
                        contents.append(timeline_content)

        return " | ".join(contents) if contents else ""

    def _merge_events(self, existing_content: str, new_content: str) -> str:
        """Merge existing and new events for the same day."""
        if not existing_content:
            return new_content

        # Simple merge strategy - combine with separator
        # In production, you might want to use an LLM to create a better summary
        return f"{existing_content} | {new_content}"

    def _filter_by_date_range(
        self,
        memories: list[tuple[str, Any]],
        start_date: str | None,
        end_date: str | None,
    ) -> list[tuple[str, Any]]:
        """Filter timeline memories by date range."""
        filtered = []

        for semantic_key, data in memories:
            # Extract date from key (timeline.YYYYMMDD)
            if "." in semantic_key:
                date_str = semantic_key.split(".")[-1]
                if self._validate_date_format(date_str):
                    # Check if within range
                    if start_date and date_str < start_date:
                        continue
                    if end_date and date_str > end_date:
                        continue
                    filtered.append((semantic_key, data))

        return filtered

    def _organize_timeline_data(
        self, timeline_memories: list[tuple[str, Any]]
    ) -> dict[str, str]:
        """Organize timeline memories into a chronological structure."""
        organized = {}

        for semantic_key, data in timeline_memories:
            try:
                # Extract date from key
                if "." not in semantic_key:
                    continue

                date_str = semantic_key.split(".")[-1]
                if not self._validate_date_format(date_str):
                    continue

                # Handle the data format
                if isinstance(data, dict):
                    # Check if this is a MemoryItem structure with content field
                    if "content" in data and isinstance(data["content"], dict):
                        memory_data = data["content"]
                        structured_data = memory_data.get("structured_data", {})
                    else:
                        memory_data = data
                        structured_data = data.get("structured_data", {})

                    # Get the timeline content
                    timeline_content = structured_data.get("timeline_content")
                    update_type = structured_data.get("update_type")

                    # Only process memories that are actual timeline events
                    if update_type != "timeline_event":
                        logger.debug(
                            f"Skipping non-timeline-event memory: {semantic_key}"
                        )
                        continue

                    if timeline_content:
                        organized[date_str] = timeline_content

            except Exception as e:
                logger.warning(f"Failed to process timeline memory {semantic_key}: {e}")
                continue

        # Sort by date
        sorted_dates = sorted(organized.keys())
        return {date: organized[date] for date in sorted_dates}

    def _generate_structured_summary(self, timeline_data: dict[str, str]) -> str:
        """Generate a structured text summary of timeline data."""
        if not timeline_data:
            return "No timeline events available."

        summary_parts = ["=== USER TIMELINE ===\n"]

        # Group by year and month for better organization
        events_by_year = {}
        for date_str, content in timeline_data.items():
            year = date_str[:4]
            month = date_str[4:6]

            if year not in events_by_year:
                events_by_year[year] = {}
            if month not in events_by_year[year]:
                events_by_year[year][month] = []

            events_by_year[year][month].append((date_str, content))

        # Generate summary by year and month
        for year in sorted(events_by_year.keys(), reverse=True):
            summary_parts.append(f"\n{year}:")

            for month in sorted(events_by_year[year].keys(), reverse=True):
                month_name = datetime.strptime(f"{year}{month}01", "%Y%m%d").strftime(
                    "%B"
                )
                summary_parts.append(f"\n  {month_name}:")

                for date_str, content in sorted(
                    events_by_year[year][month], reverse=True
                ):
                    day = int(date_str[6:8])
                    summary_parts.append(f"    {day:2d}: {content}")

        return "\n".join(summary_parts)

    async def _generate_llm_summary(self, timeline_data: dict[str, str], llm) -> str:
        """Generate a narrative summary using LLM."""
        try:
            # Convert timeline data to a readable format for LLM
            structured_summary = self._generate_structured_summary(timeline_data)

            prompt = f"""Generate a comprehensive, narrative timeline summary based on the following chronological events. Create a natural, flowing description that captures the key events and their significance in the person's life.

Timeline Data:
{structured_summary}

Instructions:
- Write in third person
- Create a cohesive narrative that flows naturally through time
- Highlight significant events and patterns
- Group related events logically
- Keep it comprehensive but concise
- Focus on the progression and development over time

Generate a timeline narrative:"""

            response = await llm.ainvoke(prompt)

            if hasattr(response, "content"):
                narrative_summary = response.content
            else:
                narrative_summary = str(response)

            # Combine structured and narrative summaries
            return f"=== USER TIMELINE ===\n\n{narrative_summary}\n\n--- Detailed Timeline ---\n{structured_summary}"

        except Exception as e:
            logger.error(f"Failed to generate LLM summary: {e}")
            # Fallback to structured summary
            return self._generate_structured_summary(timeline_data)

__init__

__init__(memory_store)

Initialize timeline memento with memory store.

Source code in src/memoir/memento/timeline.py
def __init__(self, memory_store):
    """Initialize timeline memento with memory store."""
    self.memory_store = memory_store

apply_timeline_events async

apply_timeline_events(timeline_events: list[dict[str, str]], metadata: dict | None = None, original_content: str | None = None, namespace: str = 'default') -> None

Apply timeline events to the memory store.

For same-day events, retrieves existing content and merges with new event.

Parameters:

Name Type Description Default
timeline_events list[dict[str, str]]

List of timeline events with date and description

required
metadata dict | None

Optional metadata to include with events

None
namespace str

Namespace to store timeline events in (default: "default")

'default'
Source code in src/memoir/memento/timeline.py
async def apply_timeline_events(
    self,
    timeline_events: list[dict[str, str]],
    metadata: dict | None = None,
    original_content: str | None = None,
    namespace: str = "default",
) -> None:
    """
    Apply timeline events to the memory store.

    For same-day events, retrieves existing content and merges with new event.

    Args:
        timeline_events: List of timeline events with date and description
        metadata: Optional metadata to include with events
        namespace: Namespace to store timeline events in (default: "default")
    """
    if not timeline_events:
        return

    for event in timeline_events:
        date_str = event.get("date", "")  # Format: YYYYMMDD
        description = event.get("description", "")

        if not date_str or not description:
            logger.warning(f"Invalid timeline event: {event}")
            continue

        # Validate date format
        if not self._validate_date_format(date_str):
            logger.warning(f"Invalid date format (expected YYYYMMDD): {date_str}")
            continue

        # Create the timeline path
        path = f"timeline.{date_str}"

        # Check if there's already an event for this date
        existing_events = await self.memory_store.asearch(namespace, path)

        if existing_events:
            # Merge with existing event(s) for the same day
            existing_content = self._extract_existing_content(existing_events)
            merged_content = self._merge_events(existing_content, description)
        else:
            merged_content = description

        # Store the timeline event as a memory
        memory_data = {
            "raw_text": merged_content,
            "original_content": original_content
            or merged_content,  # Store original input if available
            "summary": f"Timeline event on {self._format_date_display(date_str)}",
            "structured_data": {
                "timeline_date": date_str,
                "timeline_content": merged_content,
                "original_content": original_content or merged_content,
                "update_type": "timeline_event",
            },
            "memory_type": "timeline_event",
        }

        logger.info(f"DEBUG: Storing timeline memory_data: {memory_data}")

        # Store directly using the memory store with correct signature (async)
        await self.memory_store.store_memory_async(namespace, memory_data, path)
        logger.info(f"Applied timeline event: {path} = {merged_content[:100]}...")

get_timeline_summary async

get_timeline_summary(start_date: str | None = None, end_date: str | None = None, llm=None, namespace: str = 'default') -> str

Generate a comprehensive timeline summary from stored timeline data.

Parameters:

Name Type Description Default
start_date str | None

Optional start date (YYYYMMDD format)

None
end_date str | None

Optional end date (YYYYMMDD format)

None
llm

Optional LLM for generating narrative summary

None

Returns:

Type Description
str

Timeline summary string

Source code in src/memoir/memento/timeline.py
async def get_timeline_summary(
    self,
    start_date: str | None = None,
    end_date: str | None = None,
    llm=None,
    namespace: str = "default",
) -> str:
    """
    Generate a comprehensive timeline summary from stored timeline data.

    Args:
        start_date: Optional start date (YYYYMMDD format)
        end_date: Optional end date (YYYYMMDD format)
        llm: Optional LLM for generating narrative summary

    Returns:
        Timeline summary string
    """
    try:
        # Search for all timeline memories
        timeline_memories = await self.memory_store.asearch(namespace, "timeline.")

        # Debug: log what we found
        logger.debug(f"Found {len(timeline_memories)} timeline memories")

        # Filter by date range if specified
        if start_date or end_date:
            timeline_memories = self._filter_by_date_range(
                timeline_memories, start_date, end_date
            )

        # Limit results if too many
        if len(timeline_memories) > 1000:
            timeline_memories = timeline_memories[:1000]

        if not timeline_memories:
            return "No timeline events available."

        # Organize timeline data chronologically
        timeline_data = self._organize_timeline_data(timeline_memories)

        # Generate summary
        if llm:
            return await self._generate_llm_summary(timeline_data, llm)
        else:
            return self._generate_structured_summary(timeline_data)

    except Exception as e:
        import traceback

        logger.error(f"Failed to generate timeline summary: {e}")
        logger.error(f"Full traceback: {traceback.format_exc()}")
        return f"Error generating timeline summary: {e}"