#include "captive/macros.h"
#include <sys/mman.h>
#include "reactos/ddk/obfuncs.h"
+#include <stdlib.h>
static GHashTable *CaptiveSharedCacheMapObject_hash;
}
if (size_old!=size_new) {
+ /* ntfs.sys of NT-5.1sp1 may extend StreamFileObject while dirty pins exist.
+ * How to extend SharedCacheMap size without changing the memory location?
+ * I hope ntfs.sys does not expect long-term absolute position of its
+ * StreamFileObject:
+ */
+ if (!(captive_shared_cache_map_object->FileObject->Flags&FO_STREAM_FILE)) {
+ /* These two assertions should be already catched by pin/map signal handlers. */
+ g_assert(!captive_shared_cache_map_object->map);
+ g_assert(!g_hash_table_size(captive_shared_cache_map_object->pin_hash));
+ }
+ }
+
+ if (!size_new || size_new > captive_shared_cache_map_object->alloc_length) {
+size_t alloc_new;
+guint64 alloc64_new;
gpointer buffer_new;
- /* These two assertions should be already catched by pin/map signal handlers. */
- g_assert(!captive_shared_cache_map_object->map);
- g_assert(!g_hash_table_size(captive_shared_cache_map_object->pin_hash));
+ alloc64_new=CAPTIVE_ROUND_UP64((!size64_new ? 0 : MAX(size64_new*2,0x10000)),PAGE_SIZE);
+ alloc_new=alloc64_new;
+ if (alloc_new!=alloc64_new)
+ goto size_new_big;
- if (AllocationSize) {
+ if (!alloc_new)
+ buffer_new=NULL;
+ else {
gpointer base;
int errint;
base=mmap(
NULL, /* start */
- PAGE_SIZE+size_new+PAGE_SIZE, /* length; leading and trailing boundary check pages */
+ PAGE_SIZE+alloc_new+PAGE_SIZE, /* length; leading and trailing boundary check pages */
PROT_READ|PROT_WRITE, /* prot; read/write must be possible although write is not guaranteed to be flushed yet */
MAP_PRIVATE|MAP_ANONYMOUS /* flags */
|MAP_NORESERVE, /* At least ext2fsd maps the whole disk. */
base+=PAGE_SIZE;
errint=munmap(base-PAGE_SIZE,PAGE_SIZE); /* unmap leading boundary check page */
g_assert(errint==0);
- errint=munmap(base+size_new,PAGE_SIZE); /* unmap trailing boundary check page */
+ errint=munmap(base+alloc_new,PAGE_SIZE); /* unmap trailing boundary check page */
g_assert(errint==0);
buffer_new=base;
}
- else
- buffer_new=NULL;
memcpy(buffer_new,captive_shared_cache_map_object->buffer,
MIN(AllocationSize,captive_shared_cache_map_object->AllocationSize));
- if (captive_shared_cache_map_object->AllocationSize) {
+ if (captive_shared_cache_map_object->alloc_length) {
int errint;
- errint=munmap(captive_shared_cache_map_object->buffer,size_old);
+ errint=munmap(captive_shared_cache_map_object->buffer,captive_shared_cache_map_object->alloc_length);
g_assert(errint==0);
}
captive_shared_cache_map_object->buffer=buffer_new;
#if 0 /* It appears it is valid to squeeze out 'dirty' blocks. FIXME: Flush them? */
+ /* FIXME: The code may be no longer valid with the 'alloc_new' introduction! */
if (size_old>size_new) {
guint64 now;
}
}
#endif
+
captive_shared_cache_map_object->pages=g_realloc(captive_shared_cache_map_object->pages,
- size_new/PAGE_SIZE*sizeof(*captive_shared_cache_map_object->pages));
- if (size_new>size_old) /* prevent 'size_new-size_old' as it is unsigned! */
- memset(captive_shared_cache_map_object->pages+(size_old/PAGE_SIZE),0,
- (size_new-size_old)/PAGE_SIZE*sizeof(*captive_shared_cache_map_object->pages));
+ alloc_new/PAGE_SIZE*sizeof(*captive_shared_cache_map_object->pages));
+
+ captive_shared_cache_map_object->alloc_length=alloc_new;
}
+ if (size_new>size_old) /* prevent 'size_new-size_old' as it is unsigned! */
+ memset(captive_shared_cache_map_object->pages+(size_old/PAGE_SIZE),0,
+ (size_new-size_old)/PAGE_SIZE*sizeof(*captive_shared_cache_map_object->pages));
+
captive_shared_cache_map_object->AllocationSize=AllocationSize;
captive_shared_cache_map_object->FileSize=FileSize;
captive_shared_cache_map_object->ValidDataLength=ValidDataLength;
FileSize=FileSizes->FileSize.QuadPart;
ValidDataLength=FileSizes->ValidDataLength.QuadPart;
- if (ValidDataLength==G_MAXINT64)
- ValidDataLength=FileSize;
+ /* Do not: if (ValidDataLength==G_MAXINT64)
+ * ValidDataLength=FileSize;
+ * In some cases (during NTFS mount) there may be also invalid 'ValidDataLength' at all:
+ * CcSetFileSizes(AllocationSize=0x1000000,FileSize=0xf80208,ValidDataLength=0x23b801a0)
+ */
+ ValidDataLength=FileSize;
g_assert(AllocationSize>=0);
g_assert(FileSize>=0);
g_object_unref(captive_shared_cache_map_object);
}
+gint captive_shared_cache_map_query_w32_ref(CaptiveSharedCacheMapObject *captive_shared_cache_map_object)
+{
+ g_return_val_if_fail(CAPTIVE_SHARED_CACHE_MAP_IS_OBJECT(captive_shared_cache_map_object),0);
+
+ return captive_shared_cache_map_object->w32_ref_count;
+}
+
void captive_shared_cache_map_data_validate_read(CaptiveSharedCacheMapObject *captive_shared_cache_map_object,
FILE_OBJECT *FileObject,guint64 start,guint64 end)
{
page->lsn_newest=0;
}
-typedef struct _captive_shared_cache_map_page_write_lsn_foreach_param captive_shared_cache_map_page_write_lsn_foreach_param;
-struct _captive_shared_cache_map_page_write_lsn_foreach_param {
+typedef struct _captive_shared_cache_map_flush_lsn_sort captive_shared_cache_map_flush_lsn_sort;
+struct _captive_shared_cache_map_flush_lsn_sort {
+ gint64 lsn;
+ CaptiveSharedCacheMapObject *captive_shared_cache_map_object;
+ guint64 offset;
+ };
+
+typedef struct _captive_shared_cache_map_flush_lsn_pages_foreach_param
+ captive_shared_cache_map_flush_lsn_pages_foreach_param;
+struct _captive_shared_cache_map_flush_lsn_pages_foreach_param {
gint64 lsn_target;
- gint64 lsn_best;
- CaptiveSharedCacheMapObject *captive_shared_cache_map_object_best;
- guint64 offset_best;
+ guint lsn_pages_count;
+ captive_shared_cache_map_flush_lsn_sort *lsn_pages_pointer; /* Not filled in if NULL */
};
-static void captive_shared_cache_map_page_write_lsn_foreach(
+static void captive_shared_cache_map_flush_lsn_pages_foreach(
CaptiveSharedCacheMapObject *captive_shared_cache_map_object, /* key */
CaptiveSharedCacheMapObject *captive_shared_cache_map_object_value, /* value */
- captive_shared_cache_map_page_write_lsn_foreach_param *param) /* user_data */
+ captive_shared_cache_map_flush_lsn_pages_foreach_param *param) /* user_data */
{
guint64 now;
CaptiveSharedCacheMapObject_page *page;
+#if 0 /* acceleration */
g_return_if_fail(CAPTIVE_SHARED_CACHE_MAP_IS_OBJECT(captive_shared_cache_map_object));
g_return_if_fail(captive_shared_cache_map_object==captive_shared_cache_map_object_value);
g_return_if_fail(param!=NULL);
+#endif
for (now=0;now<captive_shared_cache_map_object->AllocationSize;now+=PAGE_SIZE) {
page=captive_shared_cache_map_object->pages+now/PAGE_SIZE;
continue;
if (!page->lsn_newest)
continue;
- if (page->lsn_newest>=param->lsn_target)
+ if (page->lsn_newest>param->lsn_target)
continue;
- if (param->lsn_best && page->lsn_newest>param->lsn_best)
+ param->lsn_pages_count++;
+ if (!param->lsn_pages_pointer)
continue;
- param->lsn_best=page->lsn_newest;
- param->captive_shared_cache_map_object_best=captive_shared_cache_map_object;
- param->offset_best=now;
+ param->lsn_pages_pointer->lsn=page->lsn_newest;
+ param->lsn_pages_pointer->captive_shared_cache_map_object=captive_shared_cache_map_object;
+ param->lsn_pages_pointer->offset=now;
+ param->lsn_pages_pointer++;
}
}
-static void captive_shared_cache_map_page_write_lsn(CaptiveSharedCacheMapObject *captive_shared_cache_map_object,
- guint64 offset)
+static int captive_shared_cache_map_flush_lsn_pages_compar
+ (const captive_shared_cache_map_flush_lsn_sort *a,const captive_shared_cache_map_flush_lsn_sort *b)
{
-CaptiveSharedCacheMapObject_page *page;
-
- g_return_if_fail(CAPTIVE_SHARED_CACHE_MAP_IS_OBJECT(captive_shared_cache_map_object));
- g_return_if_fail(captive_shared_cache_map_object->FileObject!=NULL);
- g_return_if_fail(offset<CAPTIVE_ROUND_UP64(captive_shared_cache_map_object->AllocationSize,PAGE_SIZE));
- g_return_if_fail(0==CAPTIVE_ROUND_DOWN_EXCEEDING64(offset,PAGE_SIZE));
- page=captive_shared_cache_map_object->pages+offset/PAGE_SIZE;
- g_return_if_fail(page->data_valid);
- g_return_if_fail(page->dirty);
-
- if (page->lsn_newest) {
- CaptiveSharedCacheMapObject_hash_init();
- for (;;) {
-captive_shared_cache_map_page_write_lsn_foreach_param param;
-
- param.lsn_target=page->lsn_newest;
- param.lsn_best=0;
- g_hash_table_foreach(
- CaptiveSharedCacheMapObject_hash, /* hash_table */
- (GHFunc)captive_shared_cache_map_page_write_lsn_foreach, /* func */
- ¶m); /* user_data */
- if (!param.lsn_best)
- break;
- captive_shared_cache_map_page_write(param.captive_shared_cache_map_object_best,param.offset_best);
- }
- }
+#if 0 /* acceleration */
+ g_return_val_if_fail(a!=NULL,0);
+ g_return_val_if_fail(b!=NULL,0);
+#endif
- captive_shared_cache_map_page_write(captive_shared_cache_map_object,offset);
+ return (a->lsn>b->lsn)-(b->lsn>a->lsn);
}
guint64 captive_shared_cache_map_flush(CaptiveSharedCacheMapObject *captive_shared_cache_map_object,
{
guint64 flushed;
guint64 now;
+gint64 lsn_target;
+captive_shared_cache_map_flush_lsn_pages_foreach_param lsn_pages_foreach_param;
+captive_shared_cache_map_flush_lsn_sort *lsn_pages_pointer;
+const captive_shared_cache_map_flush_lsn_sort *lsn_page;
+guint lsn_pages_count;
g_return_val_if_fail(CAPTIVE_SHARED_CACHE_MAP_IS_OBJECT(captive_shared_cache_map_object),0);
g_return_val_if_fail(start<=end,0);
start=CAPTIVE_ROUND_DOWN64(start,PAGE_SIZE);
end=CAPTIVE_ROUND_UP64(end,PAGE_SIZE);
+ lsn_target=0;
+ for (now=start;now<end;now+=PAGE_SIZE) {
+CaptiveSharedCacheMapObject_page *page;
+
+ page=captive_shared_cache_map_object->pages+now/PAGE_SIZE;
+ if (!page->data_valid)
+ continue;
+ if (!page->dirty)
+ continue;
+ if (!page->lsn_newest)
+ continue;
+ if (!lsn_target || lsn_target<page->lsn_newest)
+ lsn_target=page->lsn_newest;
+ }
+
+ lsn_pages_foreach_param.lsn_target=lsn_target;
+ lsn_pages_foreach_param.lsn_pages_count=0;
+ lsn_pages_foreach_param.lsn_pages_pointer=NULL; /* Not yet filling */
+ g_hash_table_foreach(
+ CaptiveSharedCacheMapObject_hash, /* hash_table */
+ (GHFunc)captive_shared_cache_map_flush_lsn_pages_foreach, /* func */
+ &lsn_pages_foreach_param); /* user_data */
+
+ lsn_pages_count=lsn_pages_foreach_param.lsn_pages_count;
+ captive_newn(lsn_pages_pointer,lsn_pages_count);
+ g_assert(lsn_pages_foreach_param.lsn_target==lsn_target);
+ lsn_pages_foreach_param.lsn_pages_count=0;
+ lsn_pages_foreach_param.lsn_pages_pointer=lsn_pages_pointer;
+ g_hash_table_foreach(
+ CaptiveSharedCacheMapObject_hash, /* hash_table */
+ (GHFunc)captive_shared_cache_map_flush_lsn_pages_foreach, /* func */
+ &lsn_pages_foreach_param); /* user_data */
+
+ g_assert(lsn_pages_foreach_param.lsn_target==lsn_target);
+ g_assert(lsn_pages_foreach_param.lsn_pages_count==lsn_pages_count);
+ g_assert(lsn_pages_foreach_param.lsn_pages_pointer==lsn_pages_pointer+lsn_pages_count);
+
+ qsort(lsn_pages_pointer,lsn_pages_count,sizeof(*lsn_pages_pointer),
+ (int (*)(const void *,const void *))captive_shared_cache_map_flush_lsn_pages_compar);
+
flushed=0;
+
+ for (lsn_page=lsn_pages_pointer;lsn_page<lsn_pages_pointer+lsn_pages_count;lsn_page++) {
+ captive_shared_cache_map_page_write(lsn_page->captive_shared_cache_map_object,lsn_page->offset);
+ if (lsn_page->captive_shared_cache_map_object==captive_shared_cache_map_object
+ && lsn_page->offset>=start && lsn_page->offset<end)
+ flushed+=PAGE_SIZE;
+ }
+
+ g_free(lsn_pages_pointer);
+
for (now=start;now<end;now+=PAGE_SIZE) {
- if (!captive_shared_cache_map_object->pages[now/PAGE_SIZE].data_valid)
+CaptiveSharedCacheMapObject_page *page;
+
+ page=captive_shared_cache_map_object->pages+now/PAGE_SIZE;
+ if (!page->data_valid)
continue;
- if (!captive_shared_cache_map_object->pages[now/PAGE_SIZE].dirty)
+ if (!page->dirty)
continue;
- captive_shared_cache_map_page_write_lsn(captive_shared_cache_map_object,now);
+ captive_shared_cache_map_page_write(captive_shared_cache_map_object,now);
flushed+=PAGE_SIZE;
}
/* We were calling W32 code - recheck our task completion. */
for (now=start;now<end;now+=PAGE_SIZE) {
- if (!captive_shared_cache_map_object->pages[now/PAGE_SIZE].data_valid)
+CaptiveSharedCacheMapObject_page *page;
+
+ page=captive_shared_cache_map_object->pages+now/PAGE_SIZE;
+ if (!page->data_valid)
continue;
- g_assert(!captive_shared_cache_map_object->pages[now/PAGE_SIZE].dirty);
+ g_assert(!page->dirty);
}
return flushed;
g_return_if_fail(dirty_foundp!=NULL);
for (now=0;now<captive_shared_cache_map_object->AllocationSize;now+=PAGE_SIZE) {
- if (!captive_shared_cache_map_object->pages[now/PAGE_SIZE].data_valid)
+CaptiveSharedCacheMapObject_page *page;
+
+ page=captive_shared_cache_map_object->pages+now/PAGE_SIZE;
+ if (!page->data_valid)
continue;
- if (!captive_shared_cache_map_object->pages[now/PAGE_SIZE].dirty)
+ if (!page->dirty)
continue;
*dirty_foundp=TRUE; /* FIXME: stop the traversal. */
break;