Kiln » Dependencies » Dulwich Read More
Clone URL:  
Pushed to one repository · View In Graph Contained in master

Previously, the word "type" was massively overloaded in objects.py. It
could refer to the numeric type of an object (obj.type or

obj._num_type), the type name of the object (obj._type or FOO_ID), or
the actual class (python type) of the object. This could get quite
confusing.

This change does the following:
-Replace obj._type and obj._num_type with type_name and type_num. (The
type property is retained for client compatibility, but is marked as
deprecated.) Change the various type maps and callers to use the
object's public members as keys.
-Add a convenience function object_class that takes either a string or
an int and dispatches to the appropriate type map.
-Rename the FOO_ID constants as _FOO_HEADER, since those constants
were previously overloaded to mean both header field names and type
names. There is some overlap, but this is intentional.
-Use isinstance for type comparisons rather than type, which is common
python practice and avoids the problematic word altogether.

Changeset 684b4ba58ae9

Parent f58dc961489a

committed by Jelmer Vernooij

authored by Dave Borowitz

Changes to 8 files · Browse files at 684b4ba58ae9 Showing diff from parent f58dc961489a Diff from another changeset...

Change 1 of 1 Show Entire File dulwich/​errors.py Stacked
 
37
38
39
40
 
41
42
43
 
 
44
45
46
 
47
48
49
 
50
51
52
53
54
55
 
 
56
57
58
59
60
61
 
 
62
63
64
65
66
67
 
68
69
70
71
72
73
 
 
74
75
76
 
37
38
39
 
40
41
 
 
42
43
44
45
 
46
47
 
 
48
49
50
51
52
 
 
53
54
55
56
57
58
 
 
59
60
61
62
63
64
65
 
66
67
68
69
70
 
 
71
72
73
74
75
@@ -37,40 +37,39 @@
   class WrongObjectException(Exception):   """Baseclass for all the _ is not a _ exceptions on objects. - +   Do not instantiate directly. - - Subclasses should define a _type attribute that indicates what + + Subclasses should define a type_name attribute that indicates what   was expected if they were raised.   """ - +   def __init__(self, sha, *args, **kwargs): - string = "%s is not a %s" % (sha, self._type) - Exception.__init__(self, string) + Exception.__init__(self, "%s is not a %s" % (sha, self.type_name))      class NotCommitError(WrongObjectException):   """Indicates that the sha requested does not point to a commit.""" - - _type = 'commit' + + type_name = 'commit'      class NotTreeError(WrongObjectException):   """Indicates that the sha requested does not point to a tree.""" - - _type = 'tree' + + type_name = 'tree'      class NotTagError(WrongObjectException):   """Indicates that the sha requested does not point to a tag."""   - _type = 'tag' + type_name = 'tag'      class NotBlobError(WrongObjectException):   """Indicates that the sha requested does not point to a blob.""" - - _type = 'blob' + + type_name = 'blob'      class MissingCommitError(Exception):
 
92
93
94
95
 
96
97
98
99
100
101
102
 
 
103
104
105
 
284
285
286
287
 
288
289
 
290
291
292
 
305
306
307
308
 
309
310
311
 
503
504
505
506
 
507
508
 
509
510
511
 
621
622
623
624
 
625
626
627
 
92
93
94
 
95
96
97
98
99
100
 
 
101
102
103
104
105
 
284
285
286
 
287
288
 
289
290
291
292
 
305
306
307
 
308
309
310
311
 
503
504
505
 
506
507
 
508
509
510
511
 
621
622
623
 
624
625
626
627
@@ -92,14 +92,14 @@
  """Obtain the raw text for an object.     :param name: sha for the object. - :return: tuple with object type and object contents. + :return: tuple with numeric type and object contents.   """   raise NotImplementedError(self.get_raw)     def __getitem__(self, sha):   """Obtain an object by SHA1.""" - type, uncomp = self.get_raw(sha) - return ShaFile.from_raw_string(type, uncomp) + type_num, uncomp = self.get_raw(sha) + return ShaFile.from_raw_string(type_num, uncomp)     def __iter__(self):   """Iterate over the SHAs that are present in this store.""" @@ -284,9 +284,9 @@
    def get_raw(self, name):   """Obtain the raw text for an object. - +   :param name: sha for the object. - :return: tuple with object type and object contents. + :return: tuple with numeric type and object contents.   """   if len(name) == 40:   sha = hex_to_sha(name) @@ -305,7 +305,7 @@
  hexsha = sha_to_hex(name)   ret = self._get_loose_object(hexsha)   if ret is not None: - return ret.type, ret.as_raw_string() + return ret.type_num, ret.as_raw_string()   raise KeyError(hexsha)     def add_objects(self, objects): @@ -503,9 +503,9 @@
    def get_raw(self, name):   """Obtain the raw text for an object. - +   :param name: sha for the object. - :return: tuple with object type and object contents. + :return: tuple with numeric type and object contents.   """   return self[name].as_raw_string()   @@ -621,7 +621,7 @@
  mode = None   for p in parts:   obj = lookup_obj(sha) - if type(obj) is not Tree: + if not isinstance(obj, Tree):   raise NotTreeError(sha)   if p == '':   continue
Change 1 of 21 Show Entire File dulwich/​objects.py Stacked
 
41
42
43
44
45
46
47
48
49
50
51
52
53
54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
56
57
58
59
 
60
61
62
 
89
90
91
 
 
 
 
 
 
 
 
 
92
93
94
 
97
98
99
100
101
102
103
 
 
 
 
104
105
106
 
121
122
123
124
 
125
126
127
 
163
164
165
166
 
167
168
 
169
170
 
171
172
173
 
205
206
207
208
209
 
 
210
211
212
213
 
 
214
215
216
217
 
218
219
220
221
222
 
 
223
224
225
226
 
 
227
228
229
230
 
231
232
233
234
235
236
237
238
239
 
 
 
240
241
242
 
243
244
245
 
266
267
268
269
 
270
271
272
273
 
 
 
274
275
276
 
291
292
293
294
295
 
 
296
297
298
 
321
322
323
324
 
325
326
327
 
329
330
331
332
333
 
 
334
335
336
 
339
340
341
342
343
344
345
 
 
 
 
346
347
348
 
353
354
355
356
357
358
 
 
 
359
360
361
 
362
363
 
 
 
364
365
366
 
374
375
376
377
 
378
379
380
381
 
 
 
382
383
 
384
385
386
 
400
401
402
403
404
405
 
 
 
 
 
 
406
407
408
409
 
410
411
412
 
471
472
473
474
475
 
 
476
477
478
 
483
484
485
486
 
487
488
489
 
574
575
576
577
578
 
 
579
580
581
 
588
589
590
591
 
592
593
594
 
603
604
605
606
 
607
608
 
609
610
 
611
612
613
614
 
615
616
617
618
 
619
620
621
 
623
624
625
626
 
627
628
629
630
 
 
 
 
 
 
 
631
632
 
633
634
635
 
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
 
 
 
 
 
 
 
 
 
 
 
 
 
703
704
705
 
41
42
43
 
 
 
 
 
 
 
 
 
 
 
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
 
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
 
111
112
113
 
 
 
 
114
115
116
117
118
119
120
 
135
136
137
 
138
139
140
141
 
177
178
179
 
180
181
 
182
183
 
184
185
186
187
 
219
220
221
 
 
222
223
224
225
 
 
226
227
228
 
 
 
229
230
231
232
 
 
233
234
235
236
 
 
237
238
239
 
 
 
240
241
242
243
244
245
246
 
 
 
247
248
249
250
251
 
252
253
254
255
 
276
277
278
 
279
280
281
 
 
282
283
284
285
286
287
 
302
303
304
 
 
305
306
307
308
309
 
332
333
334
 
335
336
337
338
 
340
341
342
 
 
343
344
345
346
347
 
350
351
352
 
 
 
 
353
354
355
356
357
358
359
 
364
365
366
 
 
 
367
368
369
370
371
 
372
373
 
374
375
376
377
378
379
 
387
388
389
 
390
391
 
 
 
392
393
394
395
 
396
397
398
399
 
413
414
415
 
 
 
416
417
418
419
420
421
422
423
424
 
425
426
427
428
 
487
488
489
 
 
490
491
492
493
494
 
499
500
501
 
502
503
504
505
 
590
591
592
 
 
593
594
595
596
597
 
604
605
606
 
607
608
609
610
 
619
620
621
 
622
623
 
624
625
 
626
627
628
629
 
630
631
632
633
 
634
635
636
637
 
639
640
641
 
642
643
 
 
 
644
645
646
647
648
649
650
651
 
652
653
654
655
 
705
706
707
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
@@ -41,22 +41,27 @@
  make_sha,   )   -BLOB_ID = "blob" -TAG_ID = "tag" -TREE_ID = "tree" -COMMIT_ID = "commit" -PARENT_ID = "parent" -AUTHOR_ID = "author" -COMMITTER_ID = "committer" -OBJECT_ID = "object" -TYPE_ID = "type" -TAGGER_ID = "tagger" -ENCODING_ID = "encoding" + +# Header fields for commits +_TREE_HEADER = "tree" +_PARENT_HEADER = "parent" +_AUTHOR_HEADER = "author" +_COMMITTER_HEADER = "committer" +_ENCODING_HEADER = "encoding" + + +# Header fields for objects +_OBJECT_HEADER = "object" +_TYPE_HEADER = "type" +_TAG_HEADER = "tag" +_TAGGER_HEADER = "tagger" +    S_IFGITLINK = 0160000    def S_ISGITLINK(m):   return (stat.S_IFMT(m) == S_IFGITLINK) +    def _decompress(string):   dcomp = zlib.decompressobj() @@ -89,6 +94,15 @@
  return property(get, set, doc=docstring)     +def object_class(type): + """Get the object class corresponding to the given type. + + :param type: Either a type name string or a numeric type. + :return: The ShaFile subclass corresponding to the given type. + """ + return _TYPE_MAP[type] + +  class ShaFile(object):   """A git SHA file."""   @@ -97,10 +111,10 @@
  """Parse a legacy object, creating it and setting object._text"""   text = _decompress(map)   object = None - for posstype in type_map.keys(): - if text.startswith(posstype): - object = type_map[posstype]() - text = text[len(posstype):] + for cls in OBJECT_CLASSES: + if text.startswith(cls.type_name): + object = cls() + text = text[len(cls.type_name):]   break   assert object is not None, "%s is not a known object type" % text[:9]   assert text[0] == ' ', "%s is not a space" % text[0] @@ -121,7 +135,7 @@
    def as_legacy_object(self):   text = self.as_raw_string() - return zlib.compress("%s %d\0%s" % (self._type, len(text), text)) + return zlib.compress("%s %d\0%s" % (self.type_name, len(text), text))     def as_raw_chunks(self):   if self._needs_serialization: @@ -163,11 +177,11 @@
  used = 0   byte = ord(map[used])   used += 1 - num_type = (byte >> 4) & 7 + type_num = (byte >> 4) & 7   try: - object = num_type_map[num_type]() + object = object_class(type_num)()   except KeyError: - raise AssertionError("Not a known type: %d" % num_type) + raise AssertionError("Not a known type: %d" % type_num)   while (byte & 0x80) != 0:   byte = ord(map[used])   used += 1 @@ -205,41 +219,37 @@
  finally:   f.close()   - @classmethod - def from_raw_string(cls, type, string): + @staticmethod + def from_raw_string(type_num, string):   """Creates an object of the indicated type from the raw string given.   - Type is the numeric type of an object. String is the raw uncompressed - contents. + :param type_num: The numeric type of the object. + :param string: The raw uncompressed contents.   """ - real_class = num_type_map[type] - obj = real_class() - obj.type = type + obj = object_class(type_num)()   obj.set_raw_string(string)   return obj   - @classmethod - def from_raw_chunks(cls, type, chunks): + @staticmethod + def from_raw_chunks(type_num, chunks):   """Creates an object of the indicated type from the raw chunks given.   - Type is the numeric type of an object. Chunks is a sequence of the raw - uncompressed contents. + :param type_num: The numeric type of the object. + :param chunks: An iterable of the raw uncompressed contents.   """ - real_class = num_type_map[type] - obj = real_class() - obj.type = type + obj = object_class(type_num)()   obj.set_raw_chunks(chunks)   return obj     @classmethod   def from_string(cls, string):   """Create a blob from a string.""" - shafile = cls() - shafile.set_raw_string(string) - return shafile + obj = cls() + obj.set_raw_string(string) + return obj     def _header(self): - return "%s %lu\0" % (self._type, self.raw_length()) + return "%s %lu\0" % (self.type_name, self.raw_length())     def raw_length(self):   """Returns the length of the raw string of this object.""" @@ -266,11 +276,12 @@
  return self.sha().hexdigest()     def get_type(self): - return self._num_type + return self.type_num     def set_type(self, type): - self._num_type = type - + self.type_num = type + + # DEPRECATED: use type_num or type_name as needed.   type = property(get_type, set_type)     def __repr__(self): @@ -291,8 +302,8 @@
 class Blob(ShaFile):   """A Git Blob object."""   - _type = BLOB_ID - _num_type = 3 + type_name = 'blob' + type_num = 3     def __init__(self):   super(Blob, self).__init__() @@ -321,7 +332,7 @@
  @classmethod   def from_file(cls, filename):   blob = ShaFile.from_file(filename) - if blob._type != cls._type: + if not isinstance(blob, cls):   raise NotBlobError(filename)   return blob   @@ -329,8 +340,8 @@
 class Tag(ShaFile):   """A Git Tag object."""   - _type = TAG_ID - _num_type = 4 + type_name = 'tag' + type_num = 4     def __init__(self):   super(Tag, self).__init__() @@ -339,10 +350,10 @@
    @classmethod   def from_file(cls, filename): - blob = ShaFile.from_file(filename) - if blob._type != cls._type: - raise NotBlobError(filename) - return blob + tag = ShaFile.from_file(filename) + if not isinstance(tag, cls): + raise NotTagError(filename) + return tag     @classmethod   def from_string(cls, string): @@ -353,14 +364,16 @@
    def _serialize(self):   chunks = [] - chunks.append("%s %s\n" % (OBJECT_ID, self._object_sha)) - chunks.append("%s %s\n" % (TYPE_ID, num_type_map[self._object_type]._type)) - chunks.append("%s %s\n" % (TAG_ID, self._name)) + chunks.append("%s %s\n" % (_OBJECT_HEADER, self._object_sha)) + chunks.append("%s %s\n" % (_TYPE_HEADER, self._object_class.type_name)) + chunks.append("%s %s\n" % (_TAG_HEADER, self._name))   if self._tagger:   if self._tag_time is None: - chunks.append("%s %s\n" % (TAGGER_ID, self._tagger)) + chunks.append("%s %s\n" % (_TAGGER_HEADER, self._tagger))   else: - chunks.append("%s %s %d %s\n" % (TAGGER_ID, self._tagger, self._tag_time, format_timezone(self._tag_timezone))) + chunks.append("%s %s %d %s\n" % ( + _TAGGER_HEADER, self._tagger, self._tag_time, + format_timezone(self._tag_timezone)))   chunks.append("\n") # To close headers   chunks.append(self._message)   return chunks @@ -374,13 +387,13 @@
  if l == "":   break # empty line indicates end of headers   (field, value) = l.split(" ", 1) - if field == OBJECT_ID: + if field == _OBJECT_HEADER:   self._object_sha = value - elif field == TYPE_ID: - self._object_type = type_map[value] - elif field == TAG_ID: + elif field == _TYPE_HEADER: + self._object_class = object_class(value) + elif field == _TAG_HEADER:   self._name = value - elif field == TAGGER_ID: + elif field == _TAGGER_HEADER:   try:   sep = value.index("> ")   except ValueError: @@ -400,13 +413,16 @@
  self._message = f.read()     def _get_object(self): - """Returns the object pointed by this tag, represented as a tuple(type, sha)""" - self._ensure_parsed() - return (self._object_type, self._object_sha) + """Get the object pointed to by this tag. + + :return: tuple of (object class, sha). + """ + self._ensure_parsed() + return (self._object_class, self._object_sha)     def _set_object(self, value):   self._ensure_parsed() - (self._object_type, self._object_sha) = value + (self._object_class, self._object_sha) = value   self._needs_serialization = True     object = property(_get_object, _set_object) @@ -471,8 +487,8 @@
 class Tree(ShaFile):   """A Git tree object"""   - _type = TREE_ID - _num_type = 2 + type_name = 'tree' + type_num = 2     def __init__(self):   super(Tree, self).__init__() @@ -483,7 +499,7 @@
  @classmethod   def from_file(cls, filename):   tree = ShaFile.from_file(filename) - if tree._type != cls._type: + if not isinstance(tree, cls):   raise NotTreeError(filename)   return tree   @@ -574,8 +590,8 @@
 class Commit(ShaFile):   """A git commit object"""   - _type = COMMIT_ID - _num_type = 1 + type_name = 'commit' + type_num = 1     def __init__(self):   super(Commit, self).__init__() @@ -588,7 +604,7 @@
  @classmethod   def from_file(cls, filename):   commit = ShaFile.from_file(filename) - if commit._type != cls._type: + if not isinstance(commit, cls):   raise NotCommitError(filename)   return commit   @@ -603,19 +619,19 @@
  # Empty line indicates end of headers   break   (field, value) = l.split(" ", 1) - if field == TREE_ID: + if field == _TREE_HEADER:   self._tree = value - elif field == PARENT_ID: + elif field == _PARENT_HEADER:   self._parents.append(value) - elif field == AUTHOR_ID: + elif field == _AUTHOR_HEADER:   self._author, timetext, timezonetext = value.rsplit(" ", 2)   self._author_time = int(timetext)   self._author_timezone = parse_timezone(timezonetext) - elif field == COMMITTER_ID: + elif field == _COMMITTER_HEADER:   self._committer, timetext, timezonetext = value.rsplit(" ", 2)   self._commit_time = int(timetext)   self._commit_timezone = parse_timezone(timezonetext) - elif field == ENCODING_ID: + elif field == _ENCODING_HEADER:   self._encoding = value   else:   self._extra.append((field, value)) @@ -623,13 +639,17 @@
    def _serialize(self):   chunks = [] - chunks.append("%s %s\n" % (TREE_ID, self._tree)) + chunks.append("%s %s\n" % (_TREE_HEADER, self._tree))   for p in self._parents: - chunks.append("%s %s\n" % (PARENT_ID, p)) - chunks.append("%s %s %s %s\n" % (AUTHOR_ID, self._author, str(self._author_time), format_timezone(self._author_timezone))) - chunks.append("%s %s %s %s\n" % (COMMITTER_ID, self._committer, str(self._commit_time), format_timezone(self._commit_timezone))) + chunks.append("%s %s\n" % (_PARENT_HEADER, p)) + chunks.append("%s %s %s %s\n" % ( + _AUTHOR_HEADER, self._author, str(self._author_time), + format_timezone(self._author_timezone))) + chunks.append("%s %s %s %s\n" % ( + _COMMITTER_HEADER, self._committer, str(self._commit_time), + format_timezone(self._commit_timezone)))   if self.encoding: - chunks.append("%s %s\n" % (ENCODING_ID, self.encoding)) + chunks.append("%s %s\n" % (_ENCODING_HEADER, self.encoding))   for k, v in self.extra:   if "\n" in k or "\n" in v:   raise AssertionError("newline in extra data: %r -> %r" % (k, v)) @@ -685,21 +705,19 @@
  "Encoding of the commit message.")     -type_map = { - BLOB_ID : Blob, - TREE_ID : Tree, - COMMIT_ID : Commit, - TAG_ID: Tag, -} - -num_type_map = { - 0: None, - 1: Commit, - 2: Tree, - 3: Blob, - 4: Tag, - # 5 Is reserved for further expansion -} +OBJECT_CLASSES = ( + Commit, + Tree, + Blob, + Tag, + ) + +_TYPE_MAP = {} + +for cls in OBJECT_CLASSES: + _TYPE_MAP[cls.type_name] = cls + _TYPE_MAP[cls.type_num] = cls +    try:   # Try to import C versions
Change 1 of 2 Show Entire File dulwich/​pack.py Stacked
 
833
834
835
836
 
837
838
839
 
848
849
850
851
 
852
853
854
855
856
857
858
 
859
860
861
 
833
834
835
 
836
837
838
839
 
848
849
850
 
851
852
853
854
855
856
857
 
858
859
860
861
@@ -833,7 +833,7 @@
  # This helps us find good objects to diff against us   magic = []   for obj, path in recency: - magic.append( (obj.type, path, 1, -obj.raw_length(), obj) ) + magic.append( (obj.type_num, path, 1, -obj.raw_length(), obj) )   magic.sort()   # Build a map of objects and their index in magic - so we can find preceeding objects   # to diff against @@ -848,14 +848,14 @@
  f.write(struct.pack(">L", num_objects)) # Number of objects in pack   for o, path in recency:   sha1 = o.sha().digest() - orig_t = o.type + orig_t = o.type_num   raw = o.as_raw_string()   winner = raw   t = orig_t   #for i in range(offs[o]-window, window):   # if i < 0 or i >= len(offs): continue   # b = magic[i][4] - # if b.type != orig_t: continue + # if b.type_num != orig_t: continue   # base = b.as_raw_string()   # delta = create_delta(base, raw)   # if len(delta) < len(winner):
Change 1 of 4 Show Changes Only dulwich/​repo.py Stacked
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
 
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
 
702
703
704
705
706
707
708
709
710
711
 
 
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
 
 
 
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
 
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
 
701
702
703
704
705
706
707
708
709
710
 
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
 
 
 
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
 # repo.py -- For dealing wih git repositories.  # Copyright (C) 2007 James Westby <jw+debian@jameswestby.net>  # Copyright (C) 2008-2009 Jelmer Vernooij <jelmer@samba.org>  #  # This program is free software; you can redistribute it and/or  # modify it under the terms of the GNU General Public License  # as published by the Free Software Foundation; version 2  # of the License or (at your option) any later version of  # the License.  #  # This program is distributed in the hope that it will be useful,  # but WITHOUT ANY WARRANTY; without even the implied warranty of  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the  # GNU General Public License for more details.  #  # You should have received a copy of the GNU General Public License  # along with this program; if not, write to the Free Software  # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,  # MA 02110-1301, USA.      """Repository access."""      import errno  import os    from dulwich.errors import (   MissingCommitError,   NoIndexPresent,   NotBlobError,   NotCommitError,   NotGitRepository,   NotTreeError,   NotTagError,   PackedRefsException,   )  from dulwich.file import (   ensure_dir_exists,   GitFile,   )  from dulwich.object_store import (   DiskObjectStore,   )  from dulwich.objects import (   Blob,   Commit,   ShaFile,   Tag,   Tree,   hex_to_sha, - num_type_map, + object_class,   )  import warnings      OBJECTDIR = 'objects'  SYMREF = 'ref: '  REFSDIR = 'refs'  REFSDIR_TAGS = 'tags'  REFSDIR_HEADS = 'heads'  INDEX_FILENAME = "index"    BASE_DIRECTORIES = [   [OBJECTDIR],   [OBJECTDIR, "info"],   [OBJECTDIR, "pack"],   ["branches"],   [REFSDIR],   [REFSDIR, REFSDIR_TAGS],   [REFSDIR, REFSDIR_HEADS],   ["hooks"],   ["info"]   ]      def read_info_refs(f):   ret = {}   for l in f.readlines():   (sha, name) = l.rstrip("\n").split("\t", 1)   ret[name] = sha   return ret      def check_ref_format(refname):   """Check if a refname is correctly formatted.     Implements all the same rules as git-check-ref-format[1].     [1] http://www.kernel.org/pub/software/scm/git/docs/git-check-ref-format.html     :param refname: The refname to check   :return: True if refname is valid, False otherwise   """   # These could be combined into one big expression, but are listed separately   # to parallel [1].   if '/.' in refname or refname.startswith('.'):   return False   if '/' not in refname:   return False   if '..' in refname:   return False   for c in refname:   if ord(c) < 040 or c in '\177 ~^:?*[':   return False   if refname[-1] in '/.':   return False   if refname.endswith('.lock'):   return False   if '@{' in refname:   return False   if '\\' in refname:   return False   return True      class RefsContainer(object):   """A container for refs."""     def set_ref(self, name, other):   """Make a ref point at another ref.     :param name: Name of the ref to set   :param other: Name of the ref to point at   """   self[name] = SYMREF + other + '\n'     def get_packed_refs(self):   """Get contents of the packed-refs file.     :return: Dictionary mapping ref names to SHA1s     :note: Will return an empty dictionary when no packed-refs file is   present.   """   raise NotImplementedError(self.get_packed_refs)     def get_peeled(self, name):   """Return the cached peeled value of a ref, if available.     :param name: Name of the ref to peel   :return: The peeled value of the ref. If the ref is known not point to a   tag, this will be the SHA the ref refers to. If the ref may point to   a tag, but no cached information is available, None is returned.   """   return None     def import_refs(self, base, other):   for name, value in other.iteritems():   self["%s/%s" % (base, name)] = value     def keys(self, base=None):   """Refs present in this container.     :param base: An optional base to return refs under   :return: An unsorted set of valid refs in this container, including   packed refs.   """   if base is not None:   return self.subkeys(base)   else:   return self.allkeys()     def subkeys(self, base):   keys = set()   for refname in self.allkeys():   if refname.startswith(base):   keys.add(refname)   return keys     def as_dict(self, base=None):   """Return the contents of this container as a dictionary.     """   ret = {}   keys = self.keys(base)   if base is None:   base = ""   for key in keys:   try:   ret[key] = self[("%s/%s" % (base, key)).strip("/")]   except KeyError:   continue # Unable to resolve     return ret     def _check_refname(self, name):   """Ensure a refname is valid and lives in refs or is HEAD.     HEAD is not a valid refname according to git-check-ref-format, but this   class needs to be able to touch HEAD. Also, check_ref_format expects   refnames without the leading 'refs/', but this class requires that   so it cannot touch anything outside the refs dir (or HEAD).     :param name: The name of the reference.   :raises KeyError: if a refname is not HEAD or is otherwise not valid.   """   if name == 'HEAD':   return   if not name.startswith('refs/') or not check_ref_format(name[5:]):   raise KeyError(name)     def read_loose_ref(self, name):   """Read a loose reference and return its contents.     :param name: the refname to read   :return: The contents of the ref file, or None if it does   not exist.   """   raise NotImplementedError(self.read_loose_ref)     def _follow(self, name):   """Follow a reference name.     :return: a tuple of (refname, sha), where refname is the name of the   last reference in the symbolic reference chain   """   self._check_refname(name)   contents = SYMREF + name   depth = 0   while contents.startswith(SYMREF):   refname = contents[len(SYMREF):]   contents = self.read_loose_ref(refname)   if not contents:   contents = self.get_packed_refs().get(refname, None)   if not contents:   break   depth += 1   if depth > 5:   raise KeyError(name)   return refname, contents     def __contains__(self, refname):   if self.read_loose_ref(refname):   return True   if self.get_packed_refs().get(refname, None):   return True   return False     def __getitem__(self, name):   """Get the SHA1 for a reference name.     This method follows all symbolic references.   """   _, sha = self._follow(name)   if sha is None:   raise KeyError(name)   return sha      class DictRefsContainer(RefsContainer):     def __init__(self, refs):   self._refs = refs     def allkeys(self):   return self._refs.keys()     def read_loose_ref(self, name):   return self._refs[name]      class DiskRefsContainer(RefsContainer):   """Refs container that reads refs from disk."""     def __init__(self, path):   self.path = path   self._packed_refs = None   self._peeled_refs = None     def __repr__(self):   return "%s(%r)" % (self.__class__.__name__, self.path)     def subkeys(self, base):   keys = set()   path = self.refpath(base)   for root, dirs, files in os.walk(path):   dir = root[len(path):].strip(os.path.sep).replace(os.path.sep, "/")   for filename in files:   refname = ("%s/%s" % (dir, filename)).strip("/")   # check_ref_format requires at least one /, so we prepend the   # base before calling it.   if check_ref_format("%s/%s" % (base, refname)):   keys.add(refname)   for key in self.get_packed_refs():   if key.startswith(base):   keys.add(key[len(base):].strip("/"))   return keys     def allkeys(self):   keys = set()   if os.path.exists(self.refpath("HEAD")):   keys.add("HEAD")   path = self.refpath("")   for root, dirs, files in os.walk(self.refpath("refs")):   dir = root[len(path):].strip(os.path.sep).replace(os.path.sep, "/")   for filename in files:   refname = ("%s/%s" % (dir, filename)).strip("/")   if check_ref_format(refname):   keys.add(refname)   keys.update(self.get_packed_refs())   return keys     def refpath(self, name):   """Return the disk path of a ref.     """   if os.path.sep != "/":   name = name.replace("/", os.path.sep)   return os.path.join(self.path, name)     def get_packed_refs(self):   """Get contents of the packed-refs file.     :return: Dictionary mapping ref names to SHA1s     :note: Will return an empty dictionary when no packed-refs file is   present.   """   # TODO: invalidate the cache on repacking   if self._packed_refs is None:   self._packed_refs = {}   path = os.path.join(self.path, 'packed-refs')   try:   f = GitFile(path, 'rb')   except IOError, e:   if e.errno == errno.ENOENT:   return {}   raise   try:   first_line = iter(f).next().rstrip()   if (first_line.startswith("# pack-refs") and " peeled" in   first_line):   self._peeled_refs = {}   for sha, name, peeled in read_packed_refs_with_peeled(f):   self._packed_refs[name] = sha   if peeled:   self._peeled_refs[name] = peeled   else:   f.seek(0)   for sha, name in read_packed_refs(f):   self._packed_refs[name] = sha   finally:   f.close()   return self._packed_refs     def get_peeled(self, name):   """Return the cached peeled value of a ref, if available.     :param name: Name of the ref to peel   :return: The peeled value of the ref. If the ref is known not point to a   tag, this will be the SHA the ref refers to. If the ref may point to   a tag, but no cached information is available, None is returned.   """   self.get_packed_refs()   if self._peeled_refs is None or name not in self._packed_refs:   # No cache: no peeled refs were read, or this ref is loose   return None   if name in self._peeled_refs:   return self._peeled_refs[name]   else:   # Known not peelable   return self[name]     def read_loose_ref(self, name):   """Read a reference file and return its contents.     If the reference file a symbolic reference, only read the first line of   the file. Otherwise, only read the first 40 bytes.     :param name: the refname to read, relative to refpath   :return: The contents of the ref file, or None if the file does not   exist.   :raises IOError: if any other error occurs   """   filename = self.refpath(name)   try:   f = GitFile(filename, 'rb')   try:   header = f.read(len(SYMREF))   if header == SYMREF:   # Read only the first line   return header + iter(f).next().rstrip("\n")   else:   # Read only the first 40 bytes   return header + f.read(40-len(SYMREF))   finally:   f.close()   except IOError, e:   if e.errno == errno.ENOENT:   return None   raise     def _remove_packed_ref(self, name):   if self._packed_refs is None:   return   filename = os.path.join(self.path, 'packed-refs')   # reread cached refs from disk, while holding the lock   f = GitFile(filename, 'wb')   try:   self._packed_refs = None   self.get_packed_refs()     if name not in self._packed_refs:   return     del self._packed_refs[name]   if name in self._peeled_refs:   del self._peeled_refs[name]   write_packed_refs(f, self._packed_refs, self._peeled_refs)   f.close()   finally:   f.abort()     def set_if_equals(self, name, old_ref, new_ref):   """Set a refname to new_ref only if it currently equals old_ref.     This method follows all symbolic references, and can be used to perform   an atomic compare-and-swap operation.     :param name: The refname to set.   :param old_ref: The old sha the refname must refer to, or None to set   unconditionally.   :param new_ref: The new sha the refname will refer to.   :return: True if the set was successful, False otherwise.   """   try:   realname, _ = self._follow(name)   except KeyError:   realname = name   filename = self.refpath(realname)   ensure_dir_exists(os.path.dirname(filename))   f = GitFile(filename, 'wb')   try:   if old_ref is not None:   try:   # read again while holding the lock   orig_ref = self.read_loose_ref(realname)   if orig_ref is None:   orig_ref = self.get_packed_refs().get(realname, None)   if orig_ref != old_ref:   f.abort()   return False   except (OSError, IOError):   f.abort()   raise   try:   f.write(new_ref+"\n")   except (OSError, IOError):   f.abort()   raise   finally:   f.close()   return True     def add_if_new(self, name, ref):   """Add a new reference only if it does not already exist."""   self._check_refname(name)   filename = self.refpath(name)   ensure_dir_exists(os.path.dirname(filename))   f = GitFile(filename, 'wb')   try:   if os.path.exists(filename) or name in self.get_packed_refs():   f.abort()   return False   try:   f.write(ref+"\n")   except (OSError, IOError):   f.abort()   raise   finally:   f.close()   return True     def __setitem__(self, name, ref):   """Set a reference name to point to the given SHA1.     This method follows all symbolic references.     :note: This method unconditionally overwrites the contents of a reference   on disk. To update atomically only if the reference has not changed   on disk, use set_if_equals().   """   self.set_if_equals(name, None, ref)     def remove_if_equals(self, name, old_ref):   """Remove a refname only if it currently equals old_ref.     This method does not follow symbolic references. It can be used to   perform an atomic compare-and-delete operation.     :param name: The refname to delete.   :param old_ref: The old sha the refname must refer to, or None to delete   unconditionally.   :return: True if the delete was successful, False otherwise.   """   self._check_refname(name)   filename = self.refpath(name)   ensure_dir_exists(os.path.dirname(filename))   f = GitFile(filename, 'wb')   try:   if old_ref is not None:   orig_ref = self.read_loose_ref(name)   if orig_ref is None:   orig_ref = self.get_packed_refs().get(name, None)   if orig_ref != old_ref:   return False   # may only be packed   try:   os.remove(filename)   except OSError, e:   if e.errno != errno.ENOENT:   raise   self._remove_packed_ref(name)   finally:   # never write, we just wanted the lock   f.abort()   return True     def __delitem__(self, name):   """Remove a refname.     This method does not follow symbolic references.   :note: This method unconditionally deletes the contents of a reference   on disk. To delete atomically only if the reference has not changed   on disk, use set_if_equals().   """   self.remove_if_equals(name, None)      def _split_ref_line(line):   """Split a single ref line into a tuple of SHA1 and name."""   fields = line.rstrip("\n").split(" ")   if len(fields) != 2:   raise PackedRefsException("invalid ref line '%s'" % line)   sha, name = fields   try:   hex_to_sha(sha)   except (AssertionError, TypeError), e:   raise PackedRefsException(e)   if not check_ref_format(name):   raise PackedRefsException("invalid ref name '%s'" % name)   return (sha, name)      def read_packed_refs(f):   """Read a packed refs file.     Yields tuples with SHA1s and ref names.     :param f: file-like object to read from   """   for l in f:   if l[0] == "#":   # Comment   continue   if l[0] == "^":   raise PackedRefsException(   "found peeled ref in packed-refs without peeled")   yield _split_ref_line(l)      def read_packed_refs_with_peeled(f):   """Read a packed refs file including peeled refs.     Assumes the "# pack-refs with: peeled" line was already read. Yields tuples   with ref names, SHA1s, and peeled SHA1s (or None).     :param f: file-like object to read from, seek'ed to the second line   """   last = None   for l in f:   if l[0] == "#":   continue   l = l.rstrip("\n")   if l[0] == "^":   if not last:   raise PackedRefsException("unexpected peeled ref line")   try:   hex_to_sha(l[1:])   except (AssertionError, TypeError), e:   raise PackedRefsException(e)   sha, name = _split_ref_line(last)   last = None   yield (sha, name, l[1:])   else:   if last:   sha, name = _split_ref_line(last)   yield (sha, name, None)   last = l   if last:   sha, name = _split_ref_line(last)   yield (sha, name, None)      def write_packed_refs(f, packed_refs, peeled_refs=None):   """Write a packed refs file.     :param f: empty file-like object to write to   :param packed_refs: dict of refname to sha of packed refs to write   :param peeled_refs: dict of refname to peeled value of sha   """   if peeled_refs is None:   peeled_refs = {}   else:   f.write('# pack-refs with: peeled\n')   for refname in sorted(packed_refs.iterkeys()):   f.write('%s %s\n' % (packed_refs[refname], refname))   if refname in peeled_refs:   f.write('^%s\n' % peeled_refs[refname])      class BaseRepo(object):   """Base class for a git repository.     :ivar object_store: Dictionary-like object for accessing   the objects   :ivar refs: Dictionary-like object with the refs in this repository   """     def __init__(self, object_store, refs):   self.object_store = object_store   self.refs = refs     def get_named_file(self, path):   """Get a file from the control dir with a specific name.     Although the filename should be interpreted as a filename relative to   the control dir in a disk-baked Repo, the object returned need not be   pointing to a file in that location.     :param path: The path to the file, relative to the control dir.   :return: An open file object, or None if the file does not exist.   """   raise NotImplementedError(self.get_named_file)     def open_index(self):   """Open the index for this repository.     :raises NoIndexPresent: If no index is present   :return: Index instance   """   raise NotImplementedError(self.open_index)     def fetch(self, target, determine_wants=None, progress=None):   """Fetch objects into another repository.     :param target: The target repository   :param determine_wants: Optional function to determine what refs to   fetch.   :param progress: Optional progress function   """   if determine_wants is None:   determine_wants = lambda heads: heads.values()   target.object_store.add_objects(   self.fetch_objects(determine_wants, target.get_graph_walker(),   progress))   return self.get_refs()     def fetch_objects(self, determine_wants, graph_walker, progress,   get_tagged=None):   """Fetch the missing objects required for a set of revisions.     :param determine_wants: Function that takes a dictionary with heads   and returns the list of heads to fetch.   :param graph_walker: Object that can iterate over the list of revisions   to fetch and has an "ack" method that will be called to acknowledge   that a revision is present.   :param progress: Simple progress function that will be called with   updated progress strings.   :param get_tagged: Function that returns a dict of pointed-to sha -> tag   sha for including tags.   :return: iterator over objects, with __len__ implemented   """   wants = determine_wants(self.get_refs())   haves = self.object_store.find_common_revisions(graph_walker)   return self.object_store.iter_shas(   self.object_store.find_missing_objects(haves, wants, progress,   get_tagged))     def get_graph_walker(self, heads=None):   if heads is None:   heads = self.refs.as_dict('refs/heads').values()   return self.object_store.get_graph_walker(heads)     def ref(self, name):   """Return the SHA1 a ref is pointing to."""   return self.refs[name]     def get_refs(self):   """Get dictionary with all refs."""   return self.refs.as_dict()     def head(self):   """Return the SHA1 pointed at by HEAD."""   return self.refs['HEAD']     def _get_object(self, sha, cls):   assert len(sha) in (20, 40)   ret = self.get_object(sha) - if ret._type != cls._type: + if not isinstance(ret, cls):   if cls is Commit:   raise NotCommitError(ret)   elif cls is Blob:   raise NotBlobError(ret)   elif cls is Tree:   raise NotTreeError(ret)   elif cls is Tag:   raise NotTagError(ret)   else: - raise Exception("Type invalid: %r != %r" % (ret._type, cls._type)) + raise Exception("Type invalid: %r != %r" % ( + ret.type_name, cls.type_name))   return ret     def get_object(self, sha):   return self.object_store[sha]     def get_parents(self, sha):   return self.commit(sha).parents     def get_config(self):   import ConfigParser   p = ConfigParser.RawConfigParser()   p.read(os.path.join(self._controldir, 'config'))   return dict((section, dict(p.items(section)))   for section in p.sections())     def commit(self, sha):   """Retrieve the commit with a particular SHA.     :param sha: SHA of the commit to retrieve   :raise NotCommitError: If the SHA provided doesn't point at a Commit   :raise KeyError: If the SHA provided didn't exist   :return: A `Commit` object   """   warnings.warn("Repo.commit(sha) is deprecated. Use Repo[sha] instead.",   category=DeprecationWarning, stacklevel=2)   return self._get_object(sha, Commit)     def tree(self, sha):   """Retrieve the tree with a particular SHA.     :param sha: SHA of the tree to retrieve   :raise NotTreeError: If the SHA provided doesn't point at a Tree   :raise KeyError: If the SHA provided didn't exist   :return: A `Tree` object   """   warnings.warn("Repo.tree(sha) is deprecated. Use Repo[sha] instead.",   category=DeprecationWarning, stacklevel=2)   return self._get_object(sha, Tree)     def tag(self, sha):   """Retrieve the tag with a particular SHA.     :param sha: SHA of the tag to retrieve   :raise NotTagError: If the SHA provided doesn't point at a Tag   :raise KeyError: If the SHA provided didn't exist   :return: A `Tag` object   """   warnings.warn("Repo.tag(sha) is deprecated. Use Repo[sha] instead.",   category=DeprecationWarning, stacklevel=2)   return self._get_object(sha, Tag)     def get_blob(self, sha):   """Retrieve the blob with a particular SHA.     :param sha: SHA of the blob to retrieve   :raise NotBlobError: If the SHA provided doesn't point at a Blob   :raise KeyError: If the SHA provided didn't exist   :return: A `Blob` object   """   warnings.warn("Repo.get_blob(sha) is deprecated. Use Repo[sha] "   "instead.", category=DeprecationWarning, stacklevel=2)   return self._get_object(sha, Blob)     def get_peeled(self, ref):   """Get the peeled value of a ref.     :param ref: the refname to peel   :return: the fully-peeled SHA1 of a tag object, after peeling all   intermediate tags; if the original ref does not point to a tag, this   will equal the original SHA1.   """   cached = self.refs.get_peeled(ref)   if cached is not None:   return cached   obj = self[ref] - obj_type = num_type_map[obj.type] - while obj_type == Tag: - obj_type, sha = obj.object + obj_class = object_class(obj.type_name) + while obj_class is Tag: + obj_class, sha = obj.object   obj = self.get_object(sha)   return obj.id     def revision_history(self, head):   """Returns a list of the commits reachable from head.     Returns a list of commit objects. the first of which will be the commit   of head, then following theat will be the parents.     Raises NotCommitError if any no commits are referenced, including if the   head parameter isn't the sha of a commit.     XXX: work out how to handle merges.   """   # We build the list backwards, as parents are more likely to be older   # than children   pending_commits = [head]   history = []   while pending_commits != []:   head = pending_commits.pop(0)   try:   commit = self[head]   except KeyError:   raise MissingCommitError(head)   if type(commit) != Commit:   raise NotCommitError(commit)   if commit in history:   continue   i = 0   for known_commit in history:   if known_commit.commit_time > commit.commit_time:   break   i += 1   history.insert(i, commit)   pending_commits += commit.parents   history.reverse()   return history     def __getitem__(self, name):   if len(name) in (20, 40):   return self.object_store[name]   return self.object_store[self.refs[name]]     def __setitem__(self, name, value):   if name.startswith("refs/") or name == "HEAD":   if isinstance(value, ShaFile):   self.refs[name] = value.id   elif isinstance(value, str):   self.refs[name] = value   else:   raise TypeError(value)   else:   raise ValueError(name)     def __delitem__(self, name):   if name.startswith("refs") or name == "HEAD":   del self.refs[name]   raise ValueError(name)     def do_commit(self, message, committer=None,   author=None, commit_timestamp=None,   commit_timezone=None, author_timestamp=None,   author_timezone=None, tree=None):   """Create a new commit.     :param message: Commit message   :param committer: Committer fullname   :param author: Author fullname (defaults to committer)   :param commit_timestamp: Commit timestamp (defaults to now)   :param commit_timezone: Commit timestamp timezone (defaults to GMT)   :param author_timestamp: Author timestamp (defaults to commit timestamp)   :param author_timezone: Author timestamp timezone   (defaults to commit timestamp timezone)   :param tree: SHA1 of the tree root to use (if not specified the current index will be committed).   :return: New commit SHA1   """   import time   index = self.open_index()   c = Commit()   if tree is None:   c.tree = index.commit(self.object_store)   else:   c.tree = tree   # TODO: Allow username to be missing, and get it from .git/config   if committer is None:   raise ValueError("committer not set")   c.committer = committer   if commit_timestamp is None:   commit_timestamp = time.time()   c.commit_time = int(commit_timestamp)   if commit_timezone is None:   # FIXME: Use current user timezone rather than UTC   commit_timezone = 0   c.commit_timezone = commit_timezone   if author is None:   author = committer   c.author = author   if author_timestamp is None:   author_timestamp = commit_timestamp   c.author_time = int(author_timestamp)   if author_timezone is None:   author_timezone = commit_timezone   c.author_timezone = author_timezone   c.message = message   self.object_store.add_object(c)   self.refs["HEAD"] = c.id   return c.id      class Repo(BaseRepo):   """A git repository backed by local disk."""     def __init__(self, root):   if os.path.isdir(os.path.join(root, ".git", OBJECTDIR)):   self.bare = False   self._controldir = os.path.join(root, ".git")   elif (os.path.isdir(os.path.join(root, OBJECTDIR)) and   os.path.isdir(os.path.join(root, REFSDIR))):   self.bare = True   self._controldir = root   else:   raise NotGitRepository(root)   self.path = root   object_store = DiskObjectStore(   os.path.join(self.controldir(), OBJECTDIR))   refs = DiskRefsContainer(self.controldir())   BaseRepo.__init__(self, object_store, refs)     def controldir(self):   """Return the path of the control directory."""   return self._controldir     def _put_named_file(self, path, contents):   """Write a file from the control dir with a specific name and contents.   """   f = GitFile(os.path.join(self.controldir(), path), 'wb')   try:   f.write(contents)   finally:   f.close()     def get_named_file(self, path):   """Get a file from the control dir with a specific name.     Although the filename should be interpreted as a filename relative to   the control dir in a disk-baked Repo, the object returned need not be   pointing to a file in that location.     :param path: The path to the file, relative to the control dir.   :return: An open file object, or None if the file does not exist.   """   try:   return open(os.path.join(self.controldir(), path.lstrip('/')), 'rb')   except (IOError, OSError), e:   if e.errno == errno.ENOENT:   return None   raise     def index_path(self):   """Return path to the index file."""   return os.path.join(self.controldir(), INDEX_FILENAME)     def open_index(self):   """Open the index for this repository."""   from dulwich.index import Index   if not self.has_index():   raise NoIndexPresent()   return Index(self.index_path())     def has_index(self):   """Check if an index is present."""   return os.path.exists(self.index_path())     def stage(self, paths):   """Stage a set of paths.     :param paths: List of paths, relative to the repository path   """   from dulwich.index import cleanup_mode   index = self.open_index()   for path in paths:   blob = Blob()   try:   st = os.stat(path)   except OSError:   # File no longer exists   del index[path]   else:   f = open(path, 'rb')   try:   blob.data = f.read()   finally:   f.close()   self.object_store.add_object(blob)   # XXX: Cleanup some of the other file properties as well?   index[path] = (st.st_ctime, st.st_mtime, st.st_dev, st.st_ino,   cleanup_mode(st.st_mode), st.st_uid, st.st_gid, st.st_size,   blob.id, 0)   index.write()     def __repr__(self):   return "<Repo at %r>" % self.path     @classmethod   def init(cls, path, mkdir=True):   controldir = os.path.join(path, ".git")   os.mkdir(controldir)   cls.init_bare(controldir)   return cls(path)     @classmethod   def init_bare(cls, path, mkdir=True):   for d in BASE_DIRECTORIES:   os.mkdir(os.path.join(path, *d))   ret = cls(path)   ret.refs.set_ref("HEAD", "refs/heads/master")   ret._put_named_file('description', "Unnamed repository")   ret._put_named_file('config', """[core]   repositoryformatversion = 0   filemode = true   bare = false   logallrefupdates = true  """)   ret._put_named_file(os.path.join('info', 'excludes'), '')   return ret     create = init_bare
 
183
184
185
186
 
187
188
189
 
190
191
192
 
193
194
195
 
285
286
287
288
 
183
184
185
 
186
187
188
 
189
190
191
 
192
193
194
195
 
285
286
287
 
@@ -183,13 +183,13 @@
  """Tests random access for non-delta objects"""   p = self.get_pack(pack1_sha)   obj = p[a_sha] - self.assertEqual(obj._type, 'blob') + self.assertEqual(obj.type_name, 'blob')   self.assertEqual(obj.sha().hexdigest(), a_sha)   obj = p[tree_sha] - self.assertEqual(obj._type, 'tree') + self.assertEqual(obj.type_name, 'tree')   self.assertEqual(obj.sha().hexdigest(), tree_sha)   obj = p[commit_sha] - self.assertEqual(obj._type, 'commit') + self.assertEqual(obj.type_name, 'commit')   self.assertEqual(obj.sha().hexdigest(), commit_sha)     def test_copy(self): @@ -285,4 +285,3 @@
  def test_simple_decompress(self):   self.assertEquals((["tree 4ada885c9196b6b6fa08744b5862bf92896fc002\nparent None\nauthor Jelmer Vernooij <jelmer@samba.org> 1228980214 +0000\ncommitter Jelmer Vernooij <jelmer@samba.org> 1228980214 +0000\n\nProvide replacement for mmap()'s offset argument."], 158, 'Z'),   read_zlib_chunks(StringIO(TEST_COMP1).read, 229)) -
 
92
93
94
95
 
96
97
98
99
100
 
 
101
102
103
104
 
105
106
107
 
109
110
111
112
113
 
 
114
115
116
 
119
120
121
122
 
123
124
125
 
128
129
130
131
 
132
133
 
134
135
136
 
147
148
149
150
 
151
152
153
 
 
154
155
156
 
190
191
192
193
 
194
195
 
196
197
198
 
92
93
94
 
95
96
97
98
 
 
99
100
101
102
103
 
104
105
106
107
 
109
110
111
 
 
112
113
114
115
116
 
119
120
121
 
122
123
124
125
 
128
129
130
 
131
132
 
133
134
135
136
 
147
148
149
 
150
151
 
 
152
153
154
155
156
 
190
191
192
 
193
194
 
195
196
197
198
@@ -92,16 +92,16 @@
  def test_head(self):   r = self._repo = open_repo('a.git')   self.assertEqual(r.head(), 'a90fa2d900a17e99b433217e988c4eb4a2e9a097') - +   def test_get_object(self):   r = self._repo = open_repo('a.git')   obj = r.get_object(r.head()) - self.assertEqual(obj._type, 'commit') - + self.assertEqual(obj.type_name, 'commit') +   def test_get_object_non_existant(self):   r = self._repo = open_repo('a.git')   self.assertRaises(KeyError, r.get_object, missing_sha) - +   def test_commit(self):   r = self._repo = open_repo('a.git')   warnings.simplefilter("ignore", DeprecationWarning) @@ -109,8 +109,8 @@
  obj = r.commit(r.head())   finally:   warnings.resetwarnings() - self.assertEqual(obj._type, 'commit') - + self.assertEqual(obj.type_name, 'commit') +   def test_commit_not_commit(self):   r = self._repo = open_repo('a.git')   warnings.simplefilter("ignore", DeprecationWarning) @@ -119,7 +119,7 @@
  r.commit, '4f2e6529203aa6d44b5af6e3292c837ceda003f9')   finally:   warnings.resetwarnings() - +   def test_tree(self):   r = self._repo = open_repo('a.git')   commit = r[r.head()] @@ -128,9 +128,9 @@
  tree = r.tree(commit.tree)   finally:   warnings.resetwarnings() - self.assertEqual(tree._type, 'tree') + self.assertEqual(tree.type_name, 'tree')   self.assertEqual(tree.sha().hexdigest(), commit.tree) - +   def test_tree_not_tree(self):   r = self._repo = open_repo('a.git')   warnings.simplefilter("ignore", DeprecationWarning) @@ -147,10 +147,10 @@
  tag = r.tag(tag_sha)   finally:   warnings.resetwarnings() - self.assertEqual(tag._type, 'tag') + self.assertEqual(tag.type_name, 'tag')   self.assertEqual(tag.sha().hexdigest(), tag_sha) - obj_type, obj_sha = tag.object - self.assertEqual(obj_type, objects.Commit) + obj_class, obj_sha = tag.object + self.assertEqual(obj_class, objects.Commit)   self.assertEqual(obj_sha, r.head())     def test_tag_not_tag(self): @@ -190,9 +190,9 @@
  blob = r.get_blob(blob_sha)   finally:   warnings.resetwarnings() - self.assertEqual(blob._type, 'blob') + self.assertEqual(blob.type_name, 'blob')   self.assertEqual(blob.sha().hexdigest(), blob_sha) - +   def test_get_blob_notblob(self):   r = self._repo = open_repo('a.git')   warnings.simplefilter("ignore", DeprecationWarning)
 
96
97
98
99
100
101
 
102
103
 
104
105
106
107
108
109
110
 
112
113
114
115
 
116
117
118
 
96
97
98
 
 
 
99
100
 
101
102
103
 
 
104
105
106
 
108
109
110
 
111
112
113
114
@@ -96,15 +96,11 @@
  self._environ['QUERY_STRING'] = ''     class TestTag(object): - type = Tag().type - - def __init__(self, sha, obj_type, obj_sha): + def __init__(self, sha, obj_class, obj_sha):   self.sha = lambda: sha - self.object = (obj_type, obj_sha) + self.object = (obj_class, obj_sha)     class TestBlob(object): - type = Blob().type -   def __init__(self, sha):   self.sha = lambda: sha   @@ -112,7 +108,7 @@
  blob2 = TestBlob('222')   blob3 = TestBlob('333')   - tag1 = TestTag('aaa', TestBlob.type, '222') + tag1 = TestTag('aaa', Blob, '222')     class TestRepo(object):   def __init__(self, objects, peeled):