2 * runlist.c - Run list handling code. Part of the Linux-NTFS project.
4 * Copyright (c) 2002-2003 Anton Altaparmakov
5 * Copyright (c) 2002 Richard Russon
7 * This program/include file is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as published
9 * by the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program/include file is distributed in the hope that it will be
13 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
14 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program (in the main directory of the Linux-NTFS
19 * distribution in the file COPYING); if not, write to the Free Software
20 * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
40 * ntfs_rl_mm - runlist memmove
42 static __inline__ void ntfs_rl_mm(runlist_element *base, int dst, int src,
45 if ((dst != src) && (size > 0))
46 memmove(base + dst, base + src, size * sizeof(*base));
52 * rl_mc - runlist memory copy
54 static __inline__ void ntfs_rl_mc(runlist_element *dstbase, int dst,
55 runlist_element *srcbase, int src, int size)
58 memcpy(dstbase + dst, srcbase + src, size * sizeof(*dstbase));
64 * ntfs_rl_realloc - Reallocate memory for runlists*
65 * @rl: original runlist
66 * @old_size: number of runlist elements in the original runlist @rl
67 * @new_size: number of runlist elements we need space for
69 * As the runlists grow, more memory will be required. To prevent large
70 * numbers of small reallocations of memory, this function returns a 4kiB block
73 * N.B. If the new allocation doesn't require a different number of 4kiB
74 * blocks in memory, the function will return the original pointer.
76 * On success, return a pointer to the newly allocated, or recycled, memory.
77 * On error, return NULL with errno set to the error code.
79 static __inline__ runlist_element *ntfs_rl_realloc(runlist_element *rl,
80 int old_size, int new_size)
82 old_size = (old_size * sizeof(runlist_element) + 0xfff) & ~0xfff;
83 new_size = (new_size * sizeof(runlist_element) + 0xfff) & ~0xfff;
84 if (old_size == new_size)
86 return realloc(rl, new_size);
92 * ntfs_rl_are_mergeable - test if two runlists can be joined together
93 * @dst: original runlist
94 * @src: new runlist to test for mergeability with @dst
96 * Test if two runlists can be joined together. For this, their VCNs and LCNs
99 * Return: TRUE Success, the runlists can be merged.
100 * FALSE Failure, the runlists cannot be merged.
102 static __inline__ BOOL ntfs_rl_are_mergeable(runlist_element *dst,
103 runlist_element *src)
106 Dputs("Eeek. ntfs_rl_are_mergeable() invoked with NULL "
111 if ((dst->lcn < 0) || (src->lcn < 0)) /* Are we merging holes? */
113 if ((dst->lcn + dst->length) != src->lcn) /* Are the runs contiguous? */
115 if ((dst->vcn + dst->length) != src->vcn) /* Are the runs misaligned? */
124 * __ntfs_rl_merge - merge two runlists without testing if they can be merged
125 * @dst: original, destination runlist
126 * @src: new runlist to merge with @dst
128 * Merge the two runlists, writing into the destination runlist @dst. The
129 * caller must make sure the runlists can be merged or this will corrupt the
130 * destination runlist.
132 static __inline__ void __ntfs_rl_merge(runlist_element *dst,
133 runlist_element *src)
135 dst->length += src->length;
141 * ntfs_rl_merge - test if two runlists can be joined together and merge them
142 * @dst: original, destination runlist
143 * @src: new runlist to merge with @dst
145 * Test if two runlists can be joined together. For this, their VCNs and LCNs
146 * must be adjacent. If they can be merged, perform the merge, writing into
147 * the destination runlist @dst.
149 * Return: TRUE Success, the runlists have been merged.
150 * FALSE Failure, the runlists cannot be merged and have not been
153 static __inline__ BOOL ntfs_rl_merge(runlist_element *dst,
154 runlist_element *src)
156 BOOL merge = ntfs_rl_are_mergeable(dst, src);
159 __ntfs_rl_merge(dst, src);
166 * ntfs_rl_append - append a runlist after a given element
167 * @dst: original runlist to be worked on
168 * @dsize: number of elements in @dst (including end marker)
169 * @src: runlist to be inserted into @dst
170 * @ssize: number of elements in @src (excluding end marker)
171 * @loc: append the new runlist @src after this element in @dst
173 * Append the runlist @src after element @loc in @dst. Merge the right end of
174 * the new runlist, if necessary. Adjust the size of the hole before the
177 * On success, return a pointer to the new, combined, runlist. Note, both
178 * runlists @dst and @src are deallocated before returning so you cannot use
179 * the pointers for anything any more. (Strictly speaking the returned runlist
180 * may be the same as @dst but this is irrelevant.)
182 * On error, return NULL, with errno set to the error code. Both runlists are
185 static __inline__ runlist_element *ntfs_rl_append(runlist_element *dst,
186 int dsize, runlist_element *src, int ssize, int loc)
192 Dputs("Eeek. ntfs_rl_append() invoked with NULL pointer!");
197 /* First, check if the right hand end needs merging. */
198 right = ntfs_rl_are_mergeable(src + ssize - 1, dst + loc + 1);
200 /* Space required: @dst size + @src size, less one if we merged. */
201 dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - right);
205 * We are guaranteed to succeed from here so can start modifying the
209 /* First, merge the right hand end, if necessary. */
211 __ntfs_rl_merge(src + ssize - 1, dst + loc + 1);
213 /* FIXME: What does this mean? (AIA) */
216 /* Move the tail of @dst out of the way, then copy in @src. */
217 ntfs_rl_mm(dst, magic + 1, loc + 1 + right, dsize - loc - 1 - right);
218 ntfs_rl_mc(dst, loc + 1, src, 0, ssize);
220 /* Adjust the size of the preceding hole. */
221 dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn;
223 /* We may have changed the length of the file, so fix the end marker */
224 if (dst[magic + 1].lcn == LCN_ENOENT)
225 dst[magic + 1].vcn = dst[magic].vcn + dst[magic].length;
233 * ntfs_rl_insert - insert a runlist into another
234 * @dst: original runlist to be worked on
235 * @dsize: number of elements in @dst (including end marker)
236 * @src: new runlist to be inserted
237 * @ssize: number of elements in @src (excluding end marker)
238 * @loc: insert the new runlist @src before this element in @dst
240 * Insert the runlist @src before element @loc in the runlist @dst. Merge the
241 * left end of the new runlist, if necessary. Adjust the size of the hole
242 * after the inserted runlist.
244 * On success, return a pointer to the new, combined, runlist. Note, both
245 * runlists @dst and @src are deallocated before returning so you cannot use
246 * the pointers for anything any more. (Strictly speaking the returned runlist
247 * may be the same as @dst but this is irrelevant.)
249 * On error, return NULL, with errno set to the error code. Both runlists are
252 static __inline__ runlist_element *ntfs_rl_insert(runlist_element *dst,
253 int dsize, runlist_element *src, int ssize, int loc)
256 BOOL disc = FALSE; /* Discontinuity */
257 BOOL hole = FALSE; /* Following a hole */
261 Dputs("Eeek. ntfs_rl_insert() invoked with NULL pointer!");
266 /* disc => Discontinuity between the end of @dst and the start of @src.
267 * This means we might need to insert a hole.
268 * hole => @dst ends with a hole or an unmapped region which we can
269 * extend to match the discontinuity. */
271 disc = (src[0].vcn > 0);
275 left = ntfs_rl_are_mergeable(dst + loc - 1, src);
277 merged_length = dst[loc - 1].length;
279 merged_length += src->length;
281 disc = (src[0].vcn > dst[loc - 1].vcn + merged_length);
283 hole = (dst[loc - 1].lcn == LCN_HOLE);
286 /* Space required: @dst size + @src size, less one if we merged, plus
287 * one if there was a discontinuity, less one for a trailing hole. */
288 dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left + disc - hole);
292 * We are guaranteed to succeed from here so can start modifying the
297 __ntfs_rl_merge(dst + loc - 1, src);
299 /* FIXME: What does this mean? (AIA) */
300 magic = loc + ssize - left + disc - hole;
302 /* Move the tail of @dst out of the way, then copy in @src. */
303 ntfs_rl_mm(dst, magic, loc, dsize - loc);
304 ntfs_rl_mc(dst, loc + disc - hole, src, left, ssize - left);
306 /* Adjust the VCN of the last run ... */
307 if (dst[magic].lcn <= LCN_HOLE)
308 dst[magic].vcn = dst[magic - 1].vcn + dst[magic - 1].length;
309 /* ... and the length. */
310 if (dst[magic].lcn == LCN_HOLE || dst[magic].lcn == LCN_RL_NOT_MAPPED)
311 dst[magic].length = dst[magic + 1].vcn - dst[magic].vcn;
313 /* Writing beyond the end of the file and there's a discontinuity. */
316 dst[loc - 1].length = dst[loc].vcn - dst[loc - 1].vcn;
319 dst[loc].vcn = dst[loc - 1].vcn +
321 dst[loc].length = dst[loc + 1].vcn -
325 dst[loc].length = dst[loc + 1].vcn;
327 dst[loc].lcn = LCN_RL_NOT_MAPPED;
332 if (dst[magic].lcn == LCN_ENOENT)
333 dst[magic].vcn = dst[magic - 1].vcn +
334 dst[magic - 1].length;
342 * ntfs_rl_replace - overwrite a runlist element with another runlist
343 * @dst: original runlist to be worked on
344 * @dsize: number of elements in @dst (including end marker)
345 * @src: new runlist to be inserted
346 * @ssize: number of elements in @src (excluding end marker)
347 * @loc: index in runlist @dst to overwrite with @src
349 * Replace the runlist element @dst at @loc with @src. Merge the left and
350 * right ends of the inserted runlist, if necessary.
352 * On success, return a pointer to the new, combined, runlist. Note, both
353 * runlists @dst and @src are deallocated before returning so you cannot use
354 * the pointers for anything any more. (Strictly speaking the returned runlist
355 * may be the same as @dst but this is irrelevant.)
357 * On error, return NULL, with errno set to the error code. Both runlists are
360 static __inline__ runlist_element *ntfs_rl_replace(runlist_element *dst,
361 int dsize, runlist_element *src, int ssize, int loc)
368 Dputs("Eeek. ntfs_rl_replace() invoked with NULL pointer!");
373 /* First, merge the left and right ends, if necessary. */
374 right = ntfs_rl_are_mergeable(src + ssize - 1, dst + loc + 1);
376 left = ntfs_rl_are_mergeable(dst + loc - 1, src);
378 /* Allocate some space. We'll need less if the left, right, or both
379 * ends were merged. */
380 dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left - right);
384 * We are guaranteed to succeed from here so can start modifying the
388 __ntfs_rl_merge(src + ssize - 1, dst + loc + 1);
390 __ntfs_rl_merge(dst + loc - 1, src);
392 /* FIXME: What does this mean? (AIA) */
393 magic = loc + ssize - left;
395 /* Move the tail of @dst out of the way, then copy in @src. */
396 ntfs_rl_mm(dst, magic, loc + right + 1, dsize - loc - right - 1);
397 ntfs_rl_mc(dst, loc, src, left, ssize - left);
399 /* We may have changed the length of the file, so fix the end marker */
400 if (dst[magic].lcn == LCN_ENOENT)
401 dst[magic].vcn = dst[magic - 1].vcn + dst[magic - 1].length;
408 * ntfs_rl_split - insert a runlist into the centre of a hole
409 * @dst: original runlist to be worked on
410 * @dsize: number of elements in @dst (including end marker)
411 * @src: new runlist to be inserted
412 * @ssize: number of elements in @src (excluding end marker)
413 * @loc: index in runlist @dst at which to split and insert @src
415 * Split the runlist @dst at @loc into two and insert @new in between the two
416 * fragments. No merging of runlists is necessary. Adjust the size of the
419 * On success, return a pointer to the new, combined, runlist. Note, both
420 * runlists @dst and @src are deallocated before returning so you cannot use
421 * the pointers for anything any more. (Strictly speaking the returned runlist
422 * may be the same as @dst but this is irrelevant.)
424 * On error, return NULL, with errno set to the error code. Both runlists are
427 static __inline__ runlist_element *ntfs_rl_split(runlist_element *dst,
428 int dsize, runlist_element *src, int ssize, int loc)
431 Dputs("Eeek. ntfs_rl_split() invoked with NULL pointer!");
436 /* Space required: @dst size + @src size + one new hole. */
437 dst = ntfs_rl_realloc(dst, dsize, dsize + ssize + 1);
441 * We are guaranteed to succeed from here so can start modifying the
445 /* Move the tail of @dst out of the way, then copy in @src. */
446 ntfs_rl_mm(dst, loc + 1 + ssize, loc, dsize - loc);
447 ntfs_rl_mc(dst, loc + 1, src, 0, ssize);
449 /* Adjust the size of the holes either size of @src. */
450 dst[loc].length = dst[loc+1].vcn - dst[loc].vcn;
451 dst[loc+ssize+1].vcn = dst[loc+ssize].vcn + dst[loc+ssize].length;
452 dst[loc+ssize+1].length = dst[loc+ssize+2].vcn - dst[loc+ssize+1].vcn;
459 * ntfs_runlists_merge - merge two runlists into one
460 * @drl: original runlist to be worked on
461 * @srl: new runlist to be merged into @drl
463 * First we sanity check the two runlists @srl and @drl to make sure that they
464 * are sensible and can be merged. The runlist @srl must be either after the
465 * runlist @drl or completely within a hole (or unmapped region) in @drl.
467 * Merging of runlists is necessary in two cases:
468 * 1. When attribute lists are used and a further extent is being mapped.
469 * 2. When new clusters are allocated to fill a hole or extend a file.
471 * There are four possible ways @srl can be merged. It can:
472 * - be inserted at the beginning of a hole,
473 * - split the hole in two and be inserted between the two fragments,
474 * - be appended at the end of a hole, or it can
475 * - replace the whole hole.
476 * It can also be appended to the end of the runlist, which is just a variant
477 * of the insert case.
479 * On success, return a pointer to the new, combined, runlist. Note, both
480 * runlists @drl and @srl are deallocated before returning so you cannot use
481 * the pointers for anything any more. (Strictly speaking the returned runlist
482 * may be the same as @dst but this is irrelevant.)
484 * On error, return NULL, with errno set to the error code. Both runlists are
485 * left unmodified. The following error codes are defined:
486 * ENOMEM Not enough memory to allocate runlist array.
487 * EINVAL Invalid parameters were passed in.
488 * ERANGE The runlists overlap and cannot be merged.
490 runlist_element *ntfs_runlists_merge(runlist_element *drl,
491 runlist_element *srl)
493 int di, si; /* Current index into @[ds]rl. */
494 int sstart; /* First index with lcn > LCN_RL_NOT_MAPPED. */
495 int dins; /* Index into @drl at which to insert @srl. */
496 int dend, send; /* Last index into @[ds]rl. */
497 int dfinal, sfinal; /* The last index into @[ds]rl with
503 ntfs_debug_runlist_dump(drl);
505 ntfs_debug_runlist_dump(srl);
507 /* Check for silly calling... */
511 /* Check for the case where the first mapping is being done now. */
514 /* Complete the source runlist if necessary. */
516 /* Scan to the end of the source runlist. */
517 for (dend = 0; drl[dend].length; dend++)
519 drl = ntfs_rl_realloc(drl, dend, dend + 1);
522 /* Insert start element at the front of the runlist. */
523 ntfs_rl_mm(drl, 1, 0, dend);
525 drl[0].lcn = LCN_RL_NOT_MAPPED;
526 drl[0].length = drl[1].vcn;
533 /* Skip any unmapped start element(s) in the source runlist. */
534 while (srl[si].length && srl[si].lcn < (LCN)LCN_HOLE)
537 /* Can't have an entirely unmapped source runlist. */
538 if (!srl[si].length) {
539 Dputs("Eeek! ntfs_runlists_merge() received entirely "
540 "unmapped source runlist.");
545 /* Record the starting points. */
549 * Skip forward in @drl until we reach the position where @srl needs to
550 * be inserted. If we reach the end of @drl, @srl just needs to be
553 for (; drl[di].length; di++) {
554 if (drl[di].vcn + drl[di].length > srl[sstart].vcn)
559 /* Sanity check for illegal overlaps. */
560 if ((drl[di].vcn == srl[si].vcn) && (drl[di].lcn >= 0) &&
561 (srl[si].lcn >= 0)) {
562 Dputs("Run lists overlap. Cannot merge!");
567 /* Scan to the end of both runlists in order to know their sizes. */
568 for (send = si; srl[send].length; send++)
570 for (dend = di; drl[dend].length; dend++)
573 if (srl[send].lcn == (LCN)LCN_ENOENT)
574 marker_vcn = srl[marker = send].vcn;
576 /* Scan to the last element with lcn >= LCN_HOLE. */
577 for (sfinal = send; sfinal >= 0 && srl[sfinal].lcn < LCN_HOLE; sfinal--)
579 for (dfinal = dend; dfinal >= 0 && drl[dfinal].lcn < LCN_HOLE; dfinal--)
585 int ds = dend + 1; /* Number of elements in drl & srl */
586 int ss = sfinal - sstart + 1;
588 start = ((drl[dins].lcn < LCN_RL_NOT_MAPPED) || /* End of file */
589 (drl[dins].vcn == srl[sstart].vcn)); /* Start of hole */
590 finish = ((drl[dins].lcn >= LCN_RL_NOT_MAPPED) && /* End of file */
591 ((drl[dins].vcn + drl[dins].length) <= /* End of hole */
592 (srl[send - 1].vcn + srl[send - 1].length)));
594 /* Or we'll lose an end marker */
595 if (start && finish && (drl[dins].length == 0))
597 if (marker && (drl[dins].vcn + drl[dins].length > srl[send - 1].vcn))
600 Dprintf("dfinal = %i, dend = %i\n", dfinal, dend);
601 Dprintf("sstart = %i, sfinal = %i, send = %i\n", sstart, sfinal, send);
602 Dprintf("start = %i, finish = %i\n", start, finish);
603 Dprintf("ds = %i, ss = %i, dins = %i\n", ds, ss, dins);
607 drl = ntfs_rl_replace(drl, ds, srl + sstart, ss, dins);
609 drl = ntfs_rl_insert(drl, ds, srl + sstart, ss, dins);
612 drl = ntfs_rl_append(drl, ds, srl + sstart, ss, dins);
614 drl = ntfs_rl_split(drl, ds, srl + sstart, ss, dins);
617 Dprintf("%s(): Merge failed: %s\n", __FUNCTION__,
623 Dputs("Triggering marker code.");
624 for (ds = dend; drl[ds].length; ds++)
626 /* We only need to care if @srl ended after @drl. */
627 if (drl[ds].vcn <= marker_vcn) {
630 if (drl[ds].vcn == marker_vcn) {
631 Dprintf("Old marker = %Li, replacing with "
633 (long long)drl[ds].lcn);
634 drl[ds].lcn = (LCN)LCN_ENOENT;
638 * We need to create an unmapped runlist element in
639 * @drl or extend an existing one before adding the
642 if (drl[ds].lcn == (LCN)LCN_ENOENT) {
646 if (drl[ds].lcn != (LCN)LCN_RL_NOT_MAPPED) {
647 /* Add an unmapped runlist element. */
649 /* FIXME/TODO: We need to have the
650 * extra memory already! (AIA) */
651 drl = ntfs_rl_realloc(drl, ds, ds + 2);
657 /* Need to set vcn if it isn't set already. */
659 drl[ds].vcn = drl[ds - 1].vcn +
661 drl[ds].lcn = (LCN)LCN_RL_NOT_MAPPED;
662 /* We now used up a slot. */
665 drl[ds].length = marker_vcn - drl[ds].vcn;
666 /* Finally add the ENOENT terminator. */
669 /* FIXME/TODO: We need to have the extra
670 * memory already! (AIA) */
671 drl = ntfs_rl_realloc(drl, ds, ds + 1);
675 drl[ds].vcn = marker_vcn;
676 drl[ds].lcn = (LCN)LCN_ENOENT;
677 drl[ds].length = (s64)0;
683 /* The merge was completed successfully. */
684 Dputs("Merged runlist:");
685 ntfs_debug_runlist_dump(drl);
689 /* Critical error! We cannot afford to fail here. */
690 Dperror("libntfs: Critical error");
691 Dputs("Forcing segmentation fault!");
692 marker_vcn = ((runlist*)NULL)->lcn;
697 * ntfs_mapping_pairs_decompress - convert mapping pairs array to runlist
698 * @vol: ntfs volume on which the attribute resides
699 * @attr: attribute record whose mapping pairs array to decompress
700 * @old_rl: optional runlist in which to insert @attr's runlist
702 * Decompress the attribute @attr's mapping pairs array into a runlist. On
703 * success, return the decompressed runlist.
705 * If @old_rl is not NULL, decompressed runlist is inserted into the
706 * appropriate place in @old_rl and the resultant, combined runlist is
707 * returned. The original @old_rl is deallocated.
709 * On error, return NULL with errno set to the error code. @old_rl is left
710 * unmodified in that case.
712 * The following error codes are defined:
713 * ENOMEM Not enough memory to allocate runlist array.
714 * EIO Corrupt runlist.
715 * EINVAL Invalid parameters were passed in.
716 * ERANGE The two runlists overlap.
718 * FIXME: For now we take the conceptionally simplest approach of creating the
719 * new runlist disregarding the already existing one and then splicing the
720 * two into one, if that is possible (we check for overlap and discard the new
721 * runlist if overlap present before returning NULL, with errno = ERANGE).
723 runlist_element *ntfs_mapping_pairs_decompress(const ntfs_volume *vol,
724 const ATTR_RECORD *attr, runlist_element *old_rl)
726 VCN vcn; /* Current vcn. */
727 LCN lcn; /* Current lcn. */
728 s64 deltaxcn; /* Change in [vl]cn. */
729 runlist_element *rl; /* The output runlist. */
730 u8 *buf; /* Current position in mapping pairs array. */
731 u8 *attr_end; /* End of attribute. */
732 int rlsize; /* Size of runlist buffer. */
733 u16 rlpos; /* Current runlist position in units of
735 u8 b; /* Current byte offset in buf. */
737 Dprintf("%s(): Entering for attr 0x%x.\n", __FUNCTION__,
738 le32_to_cpu(attr->type));
739 /* Make sure attr exists and is non-resident. */
740 if (!attr || !attr->non_resident ||
741 sle64_to_cpu(attr->lowest_vcn) < (VCN)0) {
745 /* Start at vcn = lowest_vcn and lcn 0. */
746 vcn = sle64_to_cpu(attr->lowest_vcn);
748 /* Get start of the mapping pairs array. */
749 buf = (u8*)attr + le16_to_cpu(attr->mapping_pairs_offset);
750 attr_end = (u8*)attr + le32_to_cpu(attr->length);
751 if (buf < (u8*)attr || buf > attr_end) {
752 Dputs("Corrupt attribute.");
756 /* Current position in runlist array. */
758 /* Allocate first 4kiB block and set current runlist size to 4kiB. */
759 rl = malloc(rlsize = 0x1000);
762 /* Insert unmapped starting element if necessary. */
765 rl->lcn = (LCN)LCN_RL_NOT_MAPPED;
769 while (buf < attr_end && *buf) {
771 * Allocate more memory if needed, including space for the
772 * not-mapped and terminator elements.
774 if (((rlpos + 3) * sizeof(*old_rl)) > rlsize) {
775 runlist_element *rl2;
778 rl2 = realloc(rl, rlsize);
787 /* Enter the current vcn into the current runlist element. */
790 * Get the change in vcn, i.e. the run length in clusters.
791 * Doing it this way ensures that we signextend negative values.
792 * A negative run length doesn't make any sense, but hey, I
793 * didn't make up the NTFS specs and Windows NT4 treats the run
794 * length as a signed value so that's how it is...
798 if (buf + b > attr_end)
800 for (deltaxcn = (s8)buf[b--]; b; b--)
801 deltaxcn = (deltaxcn << 8) + buf[b];
802 } else { /* The length entry is compulsory. */
803 Dputs("Missing length entry in mapping pairs array.");
807 * Assume a negative length to indicate data corruption and
808 * hence clean-up and return NULL.
811 Dputs("Invalid length in mapping pairs array.");
815 * Enter the current run length into the current runlist
818 rl[rlpos].length = deltaxcn;
819 /* Increment the current vcn by the current run length. */
822 * There might be no lcn change at all, as is the case for
823 * sparse clusters on NTFS 3.0+, in which case we set the lcn
827 rl[rlpos].lcn = (LCN)LCN_HOLE;
829 /* Get the lcn change which really can be negative. */
831 b = b2 + ((*buf >> 4) & 0xf);
832 if (buf + b > attr_end)
834 for (deltaxcn = (s8)buf[b--]; b > b2; b--)
835 deltaxcn = (deltaxcn << 8) + buf[b];
836 /* Change the current lcn to it's new value. */
840 * On NTFS 1.2-, apparently can have lcn == -1 to
841 * indicate a hole. But we haven't verified ourselves
842 * whether it is really the lcn or the deltaxcn that is
843 * -1. So if either is found give us a message so we
844 * can investigate it further!
846 if (vol->major_ver < 3) {
847 if (deltaxcn == (LCN)-1)
848 Dputs("lcn delta == -1");
853 /* Check lcn is not below -1. */
855 Dputs("Invalid LCN < -1 in mapping pairs "
859 /* Enter the current lcn into the runlist element. */
862 /* Get to the next runlist element. */
864 /* Increment the buffer position to the next mapping pair. */
865 buf += (*buf & 0xf) + ((*buf >> 4) & 0xf) + 1;
870 * If there is a highest_vcn specified, it must be equal to the final
871 * vcn in the runlist - 1, or something has gone badly wrong.
873 deltaxcn = sle64_to_cpu(attr->highest_vcn);
874 if (deltaxcn && vcn - 1 != deltaxcn) {
876 Dputs("Corrupt mapping pairs array in non-resident attribute.");
879 /* Setup not mapped runlist element if this is the base extent. */
880 if (!attr->lowest_vcn) {
883 max_cluster = (sle64_to_cpu(attr->allocated_size) +
884 vol->cluster_size - 1) >>
885 vol->cluster_size_bits;
887 * If there is a difference between the highest_vcn and the
888 * highest cluster, the runlist is either corrupt or, more
889 * likely, there are more extents following this one.
891 if (deltaxcn < --max_cluster) {
892 Dprintf("More extents to follow; deltaxcn = 0x%Lx, "
893 "max_cluster = 0x%Lx\n",
895 (long long)max_cluster);
897 vcn += rl[rlpos].length = max_cluster - deltaxcn;
898 rl[rlpos].lcn = (LCN)LCN_RL_NOT_MAPPED;
900 } else if (deltaxcn > max_cluster) {
901 Dprintf("Corrupt attribute. deltaxcn = 0x%Lx, "
902 "max_cluster = 0x%Lx",
904 (long long)max_cluster);
907 rl[rlpos].lcn = (LCN)LCN_ENOENT;
908 } else /* Not the base extent. There may be more extents to follow. */
909 rl[rlpos].lcn = (LCN)LCN_RL_NOT_MAPPED;
911 /* Setup terminating runlist element. */
913 rl[rlpos].length = (s64)0;
914 /* If no existing runlist was specified, we are done. */
916 Dputs("Mapping pairs array successfully decompressed:");
917 ntfs_debug_runlist_dump(rl);
920 /* Now combine the new and old runlists checking for overlaps. */
921 old_rl = ntfs_runlists_merge(old_rl, rl);
925 Dputs("Failed to merge runlists.");
928 Dputs("Corrupt attribute.");
936 * ntfs_rl_vcn_to_lcn - convert a vcn into a lcn given a runlist
937 * @rl: runlist to use for conversion
938 * @vcn: vcn to convert
940 * Convert the virtual cluster number @vcn of an attribute into a logical
941 * cluster number (lcn) of a device using the runlist @rl to map vcns to their
942 * corresponding lcns.
944 * Since lcns must be >= 0, we use negative return values with special meaning:
946 * Return value Meaning / Description
947 * ==================================================
948 * -1 = LCN_HOLE Hole / not allocated on disk.
949 * -2 = LCN_RL_NOT_MAPPED This is part of the runlist which has not been
950 * inserted into the runlist yet.
951 * -3 = LCN_ENOENT There is no such vcn in the attribute.
952 * -4 = LCN_EINVAL Input parameter error.
954 LCN ntfs_rl_vcn_to_lcn(const runlist_element *rl, const VCN vcn)
959 return (LCN)LCN_EINVAL;
961 * If rl is NULL, assume that we have found an unmapped runlist. The
962 * caller can then attempt to map it and fail appropriately if
966 return (LCN)LCN_RL_NOT_MAPPED;
968 /* Catch out of lower bounds vcn. */
970 return (LCN)LCN_ENOENT;
972 for (i = 0; rl[i].length; i++) {
973 if (vcn < rl[i+1].vcn) {
974 if (rl[i].lcn >= (LCN)0)
975 return rl[i].lcn + (vcn - rl[i].vcn);
980 * The terminator element is setup to the correct value, i.e. one of
981 * LCN_HOLE, LCN_RL_NOT_MAPPED, or LCN_ENOENT.
983 if (rl[i].lcn < (LCN)0)
985 /* Just in case... We could replace this with BUG() some day. */
986 return (LCN)LCN_ENOENT;
990 * ntfs_rl_pwrite - scatter write to disk
991 * @vol: ntfs volume to write to
992 * @rl: runlist specifying where to write the data to
993 * @pos: byte position within runlist @rl at which to begin the write
994 * @count: number of bytes to write
995 * @b: data buffer to write to disk
997 * This function will write @count bytes from data buffer @b to the volume @vol
998 * scattering the data as specified by the runlist @rl. The write begins at
999 * offset @pos into the runlist @rl.
1001 * On success, return the number of successfully written bytes. If this number
1002 * is lower than @count this means that the write has been interrupted in
1003 * flight or that an error was encountered during the write so that the write
1004 * is partial. 0 means nothing was written (also return 0 when @count is 0).
1006 * On error and nothing has been written, return -1 with errno set
1007 * appropriately to the return code of either lseek, write, fdatasync, or set
1008 * to EINVAL in case of invalid arguments.
1010 s64 ntfs_rl_pwrite(const ntfs_volume *vol, const runlist_element *rl,
1011 const s64 pos, s64 count, void *b)
1013 s64 written, to_write, ofs, total;
1016 if (!vol || !rl || pos < 0 || count < 0) {
1022 /* Seek in @rl to the run containing @pos. */
1023 for (ofs = 0; rl->length && (ofs + rl->length <= pos); rl++)
1025 /* Offset in the run at which to begin writing. */
1027 for (total = 0LL; count; rl++, ofs = 0) {
1030 if (rl->lcn < (LCN) 0) {
1034 if (rl->lcn != (LCN)LCN_HOLE)
1037 * It is a hole. Check if the buffer is zero in this
1038 * region and if not abort with error.
1040 to_write = min(count, (rl->length <<
1041 vol->cluster_size_bits) - ofs);
1042 written = to_write / sizeof(unsigned long);
1043 for (t = 0; t < written; t++) {
1044 if (((unsigned long*)b)[t])
1047 cnt = to_write & (sizeof(unsigned long) - 1);
1052 b2 = (u8*)b + (to_write &
1053 ~(sizeof(unsigned long) - 1));
1054 for (i = 0; i < cnt; i++) {
1060 * The buffer region is zero, update progress counters
1061 * and proceed with next run.
1068 /* It is a real lcn, write it to the volume. */
1069 to_write = min(count, (rl->length << vol->cluster_size_bits) -
1072 if (!NVolReadOnly(vol))
1073 written = ntfs_pwrite(vol->dev, (rl->lcn <<
1074 vol->cluster_size_bits) + ofs,
1078 /* If everything ok, update progress counters and continue. */
1085 /* If the syscall was interrupted, try again. */
1086 if (written == (s64)-1 && errno == EINTR)
1092 /* Finally, return the number of bytes written. */
1102 * ntfs_get_nr_significant_bytes - get number of bytes needed to store a number
1103 * @n: number for which to get the number of bytes for
1105 * Return the number of bytes required to store @n unambiguously as
1108 * This is used in the context of the mapping pairs array to determine how
1109 * many bytes will be needed in the array to store a given logical cluster
1110 * number (lcn) or a specific run length.
1112 * Return the number of bytes written. This function cannot fail.
1114 __inline__ int ntfs_get_nr_significant_bytes(const s64 n)
1124 } while (l != 0LL && l != -1LL);
1125 j = (n >> 8 * (i - 1)) & 0xff;
1126 /* If the sign bit is wrong, we need an extra byte. */
1127 if ((n < 0LL && j >= 0) || (n > 0LL && j < 0))
1133 * ntfs_get_size_for_mapping_pairs - get bytes needed for mapping pairs array
1134 * @vol: ntfs volume (needed for the ntfs version)
1135 * @rl: runlist for which to determine the size of the mapping pairs
1137 * Walk the runlist @rl and calculate the size in bytes of the mapping pairs
1138 * array corresponding to the runlist @rl. This for example allows us to
1139 * allocate a buffer of the right size when building the mapping pairs array.
1141 * Return the calculated size in bytes on success. If @rl is NULL return 0.
1142 * On error, return -1 with errno set to the error code. The following error
1143 * codes are defined:
1144 * EINVAL - Run list contains unmapped elements. Make sure to only pass
1145 * fully mapped runlists to this function.
1146 * EIO - The runlist is corrupt.
1148 int ntfs_get_size_for_mapping_pairs(const ntfs_volume *vol,
1149 const runlist_element *rl)
1156 /* Always need the termining zero byte. */
1158 for (prev_lcn = i = 0; rl[i].length; prev_lcn = rl[i++].lcn) {
1159 if (rl[i].length < 0 || rl[i].lcn < LCN_HOLE)
1161 /* Header byte + length. */
1162 rls += 1 + ntfs_get_nr_significant_bytes(rl[i].length);
1164 * If the logical cluster number (lcn) denotes a hole and we
1165 * are on NTFS 3.0+, we don't store it at all, i.e. we need
1166 * zero space. On earlier NTFS versions we just store the lcn.
1168 if (rl[i].lcn == LCN_HOLE && vol->major_ver >= 3)
1170 /* Change in lcn. */
1171 rls += ntfs_get_nr_significant_bytes(rl[i].lcn - prev_lcn);
1175 if (rl[i].lcn == LCN_RL_NOT_MAPPED)
1183 * ntfs_write_significant_bytes - write the significant bytes of a number
1184 * @dst: destination buffer to write to
1185 * @dst_max: pointer to last byte of destination buffer for bounds checking
1186 * @n: number whose significant bytes to write
1188 * Store in @dst, the minimum bytes of the number @n which are required to
1189 * identify @n unambiguously as a signed number, taking care not to exceed
1190 * @dest_max, the maximum position within @dst to which we are allowed to
1193 * This is used when building the mapping pairs array of a runlist to compress
1194 * a given logical cluster number (lcn) or a specific run length to the minumum
1197 * Return the number of bytes written on success. On error, i.e. the
1198 * destination buffer @dst is too small, return -1 with errno set ENOSPC.
1200 __inline__ int ntfs_write_significant_bytes(s8 *dst, const s8 *dst_max,
1211 *dst++ = l & 0xffLL;
1214 } while (l != 0LL && l != -1LL);
1215 j = (n >> 8 * (i - 1)) & 0xff;
1216 /* If the sign bit is wrong, we need an extra byte. */
1217 if (n < 0LL && j >= 0) {
1222 } else if (n > 0LL && j < 0) {
1235 * ntfs_mapping_pairs_build - build the mapping pairs array from a runlist
1236 * @vol: ntfs volume (needed for the ntfs version)
1237 * @dst: destination buffer to which to write the mapping pairs array
1238 * @dst_len: size of destination buffer @dst in bytes
1239 * @rl: runlist for which to build the mapping pairs array
1241 * Create the mapping pairs array from the runlist @rl and save the array in
1242 * @dst. @dst_len is the size of @dst in bytes and it should be at least equal
1243 * to the value obtained by calling ntfs_get_size_for_mapping_pairs().
1245 * Return 0 on success or when @rl is NULL. On error, return -1 with errno set
1246 * to the error code. The following error codes are defined:
1247 * EINVAL - Run list contains unmapped elements. Make sure to only pass
1248 * fully mapped runlists to this function.
1249 * EIO - The runlist is corrupt.
1250 * ENOSPC - The destination buffer is too small.
1252 int ntfs_mapping_pairs_build(const ntfs_volume *vol, s8 *dst,
1253 const int dst_len, const runlist_element *rl)
1258 s8 len_len, lcn_len;
1263 * @dst_max is used for bounds checking in
1264 * ntfs_write_significant_bytes().
1266 dst_max = dst + dst_len - 1;
1267 for (prev_lcn = i = 0; rl[i].length; prev_lcn = rl[i++].lcn) {
1268 if (rl[i].length < 0 || rl[i].lcn < LCN_HOLE)
1271 len_len = ntfs_write_significant_bytes(dst + 1, dst_max,
1276 * If the logical cluster number (lcn) denotes a hole and we
1277 * are on NTFS 3.0+, we don't store it at all, i.e. we need
1278 * zero space. On earlier NTFS versions we just write the lcn
1279 * change. FIXME: Do we need to write the lcn change or just
1280 * the lcn in that case? Not sure as I have never seen this
1281 * case on NT4. (AIA)
1283 if (rl[i].lcn != LCN_HOLE || vol->major_ver < 3) {
1284 lcn_len = ntfs_write_significant_bytes(dst + 1 +
1285 len_len, dst_max, rl[i].lcn - prev_lcn);
1290 /* Update header byte. */
1291 *dst = lcn_len << 4 | len_len;
1292 /* Position ourselves at next mapping pairs array element. */
1293 dst += 1 + len_len + lcn_len;
1295 if (dst <= dst_max) {
1296 /* Terminator byte. */
1304 if (rl[i].lcn == LCN_RL_NOT_MAPPED)
1312 * ntfs_rl_truncate - truncate a runlist starting at a specified vcn
1313 * @arl: address of runlist to truncate
1314 * @start_vcn: first vcn which should be cut off
1316 * Truncate the runlist *@arl starting at vcn @start_vcn as well as the memory
1317 * buffer holding the runlist.
1319 * Return 0 on success and -1 on error with errno set to the error code.
1321 * NOTE: @arl is the address of the runlist. We need the address so we can
1322 * modify the pointer to the runlist with the new, reallocated memory buffer.
1324 int ntfs_rl_truncate(runlist **arl, const VCN start_vcn)
1329 if (!arl || !*arl) {
1336 if (start_vcn < rl->vcn) {
1337 // FIXME: Eeek! BUG()
1338 fprintf(stderr, "%s(): Eeek! start_vcn lies outside front of "
1339 "runlist! Aborting.\n", __FUNCTION__);
1344 /* Find the starting vcn in the run list. */
1345 while (rl->length) {
1346 if (start_vcn < rl[1].vcn)
1351 // FIXME: Weird, probably a BUG()!
1352 fprintf(stderr, "%s(): Weird! Asking to truncate already "
1353 "truncated runlist?!? Abort.\n", __FUNCTION__);
1357 if (start_vcn < rl->vcn) {
1358 // FIXME: Eeek! BUG()
1359 fprintf(stderr, "%s(): Eeek! start_vcn < rl->vcn! Aborting.\n",
1368 /* Truncate the run. */
1369 rl->length = start_vcn - rl->vcn;
1372 * If a run was partially truncated, make the following runlist
1373 * element a terminator instead of the truncated runlist
1380 rl->vcn = start_vcn;
1386 rl->lcn = (LCN)LCN_ENOENT;
1388 /* Reallocate memory if necessary. */
1390 rl = realloc(*arl, (rl - *arl + 1) * sizeof(runlist_element));
1395 fprintf(stderr, "%s(): Eeek! Failed to reallocate "
1396 "runlist buffer! Continuing "
1397 "regardless and returning success.\n",