Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit a243827

Browse files
committedJun 4, 2010
Implemented stream tests, found a bug on the way, slowly a test-framework for streams starts to show up, but its not yet there
1 parent 6fbb693 commit a243827

File tree

5 files changed

+490
-251
lines changed

5 files changed

+490
-251
lines changed
 

‎lib/git/odb/fun.py

+12-5
Original file line numberDiff line numberDiff line change
@@ -83,26 +83,33 @@ def write_object(type, size, read, write, chunk_size=chunk_size):
8383
:param size: amount of bytes to write from source_stream
8484
:param read: read method of a stream providing the content data
8585
:param write: write method of the output stream
86-
:param close_target_stream: if True, the target stream will be closed when
86+
:param close_target_stream: if True, the target stream will be closed when
8787
the routine exits, even if an error is thrown
8888
:return: The actual amount of bytes written to stream, which includes the header and a trailing newline"""
8989
tbw = 0 # total num bytes written
90-
dbw = 0 # num data bytes written
9190

9291
# WRITE HEADER: type SP size NULL
9392
tbw += write("%s %i\0" % (type, size))
93+
tbw += stream_copy(read, write, size, chunk_size)
94+
95+
return tbw
9496

97+
def stream_copy(read, write, size, chunk_size):
98+
"""Copy a stream up to size bytes using the provided read and write methods,
99+
in chunks of chunk_size
100+
:note: its much like stream_copy utility, but operates just using methods"""
101+
dbw = 0 # num data bytes written
102+
95103
# WRITE ALL DATA UP TO SIZE
96104
while True:
97105
cs = min(chunk_size, size-dbw)
98106
data_len = write(read(cs))
99107
dbw += data_len
100108
if data_len < cs or dbw == size:
101-
tbw += dbw
102109
break
103110
# END check for stream end
104111
# END duplicate data
105-
return tbw
106-
112+
return dbw
113+
107114

108115
#} END routines

‎lib/git/odb/stream.py

+40-26
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ def is_compressed(self):
7575
""":return: True if reads of this stream yield zlib compressed data. Default False
7676
:note: this does not imply anything about the actual internal storage.
7777
Hence the data could be uncompressed, but read compressed, or vice versa"""
78-
raise False
78+
return False
7979

8080
#} END interface
8181

@@ -105,10 +105,12 @@ def __init__(self, type, size, stream, sha=None, compressed=None):
105105

106106
#{ Interface
107107

108+
@property
108109
def hexsha(self):
109110
""":return: our sha, hex encoded, 40 bytes"""
110111
return to_hex_sha(self[0])
111-
112+
113+
@property
112114
def binsha(self):
113115
""":return: our sha as binary, 20 bytes"""
114116
return to_bin_sha(self[0])
@@ -229,10 +231,11 @@ class DecompressMemMapReader(object):
229231
and decompress it into chunks, thats all ... """
230232
__slots__ = ('_m', '_zip', '_buf', '_buflen', '_br', '_cws', '_cwe', '_s', '_close')
231233

232-
max_read_size = 512*1024
234+
max_read_size = 512*1024 # currently unused
233235

234236
def __init__(self, m, close_on_deletion, size):
235-
"""Initialize with mmap for stream reading"""
237+
"""Initialize with mmap for stream reading
238+
:param m: must be content data - use new if you have object data and no size"""
236239
self._m = m
237240
self._zip = zlib.decompressobj()
238241
self._buf = None # buffer of decompressed bytes
@@ -248,32 +251,38 @@ def __del__(self):
248251
self._m.close()
249252
# END handle resource freeing
250253

251-
@classmethod
252-
def new(self, m, close_on_deletion=False):
253-
"""Create a new DecompressMemMapReader instance for acting as a read-only stream
254-
This method parses the object header from m and returns the parsed
255-
type and size, as well as the created stream instance.
256-
:param m: memory map on which to oparate
257-
:param close_on_deletion: if True, the memory map will be closed once we are
258-
being deleted"""
259-
inst = DecompressMemMapReader(m, close_on_deletion, 0)
260-
254+
def _parse_header_info(self):
255+
"""If this stream contains object data, parse the header info and skip the
256+
stream to a point where each read will yield object content
257+
:return: parsed type_string, size"""
261258
# read header
262259
maxb = 512 # should really be enough, cgit uses 8192 I believe
263-
inst._s = maxb
264-
hdr = inst.read(maxb)
260+
self._s = maxb
261+
hdr = self.read(maxb)
265262
hdrend = hdr.find("\0")
266263
type, size = hdr[:hdrend].split(" ")
267264
size = int(size)
268-
inst._s = size
265+
self._s = size
269266

270267
# adjust internal state to match actual header length that we ignore
271268
# The buffer will be depleted first on future reads
272-
inst._br = 0
269+
self._br = 0
273270
hdrend += 1 # count terminating \0
274-
inst._buf = StringIO(hdr[hdrend:])
275-
inst._buflen = len(hdr) - hdrend
271+
self._buf = StringIO(hdr[hdrend:])
272+
self._buflen = len(hdr) - hdrend
273+
274+
return type, size
276275

276+
@classmethod
277+
def new(self, m, close_on_deletion=False):
278+
"""Create a new DecompressMemMapReader instance for acting as a read-only stream
279+
This method parses the object header from m and returns the parsed
280+
type and size, as well as the created stream instance.
281+
:param m: memory map on which to oparate. It must be object data ( header + contents )
282+
:param close_on_deletion: if True, the memory map will be closed once we are
283+
being deleted"""
284+
inst = DecompressMemMapReader(m, close_on_deletion, 0)
285+
type, size = inst._parse_header_info()
277286
return type, size, inst
278287

279288
def read(self, size=-1):
@@ -355,17 +364,22 @@ def read(self, size=-1):
355364
# needs to be as large as the uncompressed bytes we want to read.
356365
self._cws = self._cwe - len(tail)
357366
self._cwe = self._cws + size
358-
359-
360-
indata = self._m[self._cws:self._cwe] # another copy ... :(
361-
# get the actual window end to be sure we don't use it for computations
362-
self._cwe = self._cws + len(indata)
363367
else:
364368
cws = self._cws
365369
self._cws = self._cwe
366370
self._cwe = cws + size
367-
indata = self._m[self._cws:self._cwe] # ... copy it again :(
368371
# END handle tail
372+
373+
374+
# if window is too small, make it larger so zip can decompress something
375+
win_size = self._cwe - self._cws
376+
if win_size < 8:
377+
self._cwe = self._cws + 8
378+
# END adjust winsize
379+
indata = self._m[self._cws:self._cwe] # another copy ... :(
380+
381+
# get the actual window end to be sure we don't use it for computations
382+
self._cwe = self._cws + len(indata)
369383

370384
dcompdat = self._zip.decompress(indata, size)
371385

‎test/git/performance/test_streams.py

+2-11
Original file line numberDiff line numberDiff line change
@@ -3,13 +3,11 @@
33
from test.testlib import *
44
from git.odb import *
55

6-
from array import array
76
from cStringIO import StringIO
87
from time import time
98
import os
109
import sys
1110
import stat
12-
import random
1311
import subprocess
1412

1513

@@ -18,18 +16,11 @@
1816
)
1917

2018

21-
2219
def make_memory_file(size_in_bytes, randomize=False):
2320
""":return: tuple(size_of_stream, stream)
2421
:param randomize: try to produce a very random stream"""
25-
actual_size = size_in_bytes / 4
26-
producer = xrange(actual_size)
27-
if randomize:
28-
producer = list(producer)
29-
random.shuffle(producer)
30-
# END randomize
31-
a = array('i', producer)
32-
return actual_size*4, StringIO(a.tostring())
22+
d = make_bytes(size_in_bytes, randomize)
23+
return len(d), StringIO(d)
3324

3425

3526
class TestObjDBPerformance(TestBigRepoR):

‎test/git/test_odb.py

+200-2
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,210 @@
11
"""Test for object db"""
2-
32
from test.testlib import *
43
from git.odb import *
4+
from git.odb.utils import (
5+
to_hex_sha,
6+
to_bin_sha
7+
)
58
from git.odb.stream import Sha1Writer
69
from git import Blob
710
from git.errors import BadObject
8-
911
from cStringIO import StringIO
12+
import tempfile
1013
import os
14+
import zlib
15+
16+
17+
#{ Stream Utilities
18+
19+
class DummyStream(object):
20+
def __init__(self):
21+
self.was_read = False
22+
self.bytes = 0
23+
self.closed = False
24+
25+
def read(self, size):
26+
self.was_read = True
27+
self.bytes = size
28+
29+
def close(self):
30+
self.closed = True
31+
32+
def _assert(self):
33+
assert self.was_read
34+
35+
class DeriveTest(OStream):
36+
def __init__(self, sha, type, size, stream, *args, **kwargs):
37+
self.myarg = kwargs.pop('myarg')
38+
self.args = args
39+
40+
def _assert(self):
41+
assert self.args
42+
assert self.myarg
43+
44+
#} END stream utilitiess
45+
46+
1147

48+
class TestStream(TestBase):
49+
"""Test stream classes"""
50+
51+
data_sizes = (15, 10000, 1000*1024+512)
52+
53+
def test_streams(self):
54+
# test info
55+
sha = Blob.NULL_HEX_SHA
56+
s = 20
57+
info = OInfo(sha, Blob.type, s)
58+
assert info.sha == sha
59+
assert info.type == Blob.type
60+
assert info.size == s
61+
62+
# test ostream
63+
stream = DummyStream()
64+
ostream = OStream(*(info + (stream, )))
65+
ostream.read(15)
66+
stream._assert()
67+
assert stream.bytes == 15
68+
ostream.read(20)
69+
assert stream.bytes == 20
70+
71+
# defaults false
72+
assert not ostream.is_compressed()
73+
74+
# derive with own args
75+
DeriveTest(sha, Blob.type, s, stream, 'mine',myarg = 3)._assert()
76+
77+
# test istream
78+
istream = IStream(Blob.type, s, stream)
79+
assert not istream.is_compressed()
80+
assert istream.sha == None
81+
istream.sha = sha
82+
assert istream.sha == sha
83+
84+
assert len(istream.binsha) == 20
85+
assert len(istream.hexsha) == 40
86+
87+
assert istream.size == s
88+
istream.size = s * 2
89+
istream.size == s * 2
90+
assert istream.type == Blob.type
91+
istream.type = "something"
92+
assert istream.type == "something"
93+
assert istream.stream is stream
94+
istream.stream = None
95+
assert istream.stream is None
96+
97+
def _assert_stream_reader(self, stream, cdata, rewind_stream=lambda s: None):
98+
"""Make stream tests - the orig_stream is seekable, allowing it to be
99+
rewound and reused
100+
:param cdata: the data we expect to read from stream, the contents
101+
:param rewind_stream: function called to rewind the stream to make it ready
102+
for reuse"""
103+
ns = 10
104+
assert len(cdata) > ns-1, "Data must be larger than %i, was %i" % (ns, len(cdata))
105+
106+
# read in small steps
107+
ss = len(cdata) / ns
108+
for i in range(ns):
109+
data = stream.read(ss)
110+
chunk = cdata[i*ss:(i+1)*ss]
111+
assert data == chunk
112+
# END for each step
113+
rest = stream.read()
114+
if rest:
115+
assert rest == cdata[-len(rest):]
116+
# END handle rest
117+
118+
rewind_stream(stream)
119+
120+
# read everything
121+
rdata = stream.read()
122+
assert rdata == cdata
123+
124+
def test_decompress_reader(self):
125+
for close_on_deletion in range(2):
126+
for with_size in range(2):
127+
for ds in self.data_sizes:
128+
cdata = make_bytes(ds, randomize=False)
129+
130+
# zdata = zipped actual data
131+
# cdata = original content data
132+
133+
# create reader
134+
if with_size:
135+
# need object data
136+
zdata = zlib.compress(make_object(Blob.type, cdata))
137+
type, size, reader = DecompressMemMapReader.new(zdata, close_on_deletion)
138+
assert size == len(cdata)
139+
assert type == Blob.type
140+
else:
141+
# here we need content data
142+
zdata = zlib.compress(cdata)
143+
reader = DecompressMemMapReader(zdata, close_on_deletion, len(cdata))
144+
assert reader._s == len(cdata)
145+
# END get reader
146+
147+
def rewind(r):
148+
r._zip = zlib.decompressobj()
149+
r._br = r._cws = r._cwe = 0
150+
if with_size:
151+
r._parse_header_info()
152+
# END skip header
153+
# END make rewind func
154+
155+
self._assert_stream_reader(reader, cdata, rewind)
156+
157+
# put in a dummy stream for closing
158+
dummy = DummyStream()
159+
reader._m = dummy
160+
161+
assert not dummy.closed
162+
del(reader)
163+
assert dummy.closed == close_on_deletion
164+
#zdi#
165+
# END for each datasize
166+
# END whether size should be used
167+
# END whether stream should be closed when deleted
168+
169+
def test_sha_writer(self):
170+
writer = Sha1Writer()
171+
assert 2 == writer.write("hi")
172+
assert len(writer.sha(as_hex=1)) == 40
173+
assert len(writer.sha(as_hex=0)) == 20
174+
175+
# make sure it does something ;)
176+
prev_sha = writer.sha()
177+
writer.write("hi again")
178+
assert writer.sha() != prev_sha
179+
180+
def test_compressed_writer(self):
181+
for ds in self.data_sizes:
182+
fd, path = tempfile.mkstemp()
183+
ostream = FDCompressedSha1Writer(fd)
184+
data = make_bytes(ds, randomize=False)
185+
186+
# for now, just a single write, code doesn't care about chunking
187+
assert len(data) == ostream.write(data)
188+
ostream.close()
189+
# its closed already
190+
self.failUnlessRaises(OSError, os.close, fd)
191+
192+
# read everything back, compare to data we zip
193+
fd = os.open(path, os.O_RDONLY)
194+
written_data = os.read(fd, os.path.getsize(path))
195+
os.close(fd)
196+
assert written_data == zlib.compress(data, 1) # best speed
197+
198+
os.remove(path)
199+
# END for each os
200+
201+
202+
class TestUtils(TestBase):
203+
def test_basics(self):
204+
assert to_hex_sha(Blob.NULL_HEX_SHA) == Blob.NULL_HEX_SHA
205+
assert len(to_bin_sha(Blob.NULL_HEX_SHA)) == 20
206+
assert to_hex_sha(to_bin_sha(Blob.NULL_HEX_SHA)) == Blob.NULL_HEX_SHA
207+
12208

13209
class TestDB(TestBase):
14210
"""Test the different db class implementations"""
@@ -35,6 +231,8 @@ def _assert_object_writing(self, db):
35231
assert type(prev_ostream) in ostreams or prev_ostream in ostreams
36232

37233
istream = IStream(Blob.type, len(data), StringIO(data))
234+
235+
# store returns same istream instance, with new sha set
38236
my_istream = db.store(istream)
39237
sha = istream.sha
40238
assert my_istream is istream

‎test/testlib/helper.py

+236-207
Original file line numberDiff line numberDiff line change
@@ -9,227 +9,256 @@
99
from unittest import TestCase
1010
import tempfile
1111
import shutil
12+
import random
13+
from array import array
1214
import cStringIO
1315

1416
GIT_REPO = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
1517

18+
#{ Routines
19+
1620
def fixture_path(name):
17-
test_dir = os.path.dirname(os.path.dirname(__file__))
18-
return os.path.join(test_dir, "fixtures", name)
21+
test_dir = os.path.dirname(os.path.dirname(__file__))
22+
return os.path.join(test_dir, "fixtures", name)
1923

2024
def fixture(name):
21-
return open(fixture_path(name), 'rb').read()
25+
return open(fixture_path(name), 'rb').read()
2226

2327
def absolute_project_path():
24-
return os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
25-
26-
28+
return os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
29+
30+
def make_bytes(size_in_bytes, randomize=False):
31+
""":return: string with given size in bytes
32+
:param randomize: try to produce a very random stream"""
33+
actual_size = size_in_bytes / 4
34+
producer = xrange(actual_size)
35+
if randomize:
36+
producer = list(producer)
37+
random.shuffle(producer)
38+
# END randomize
39+
a = array('i', producer)
40+
return a.tostring()
41+
42+
43+
def make_object(type, data):
44+
""":return: bytes resembling an uncompressed object"""
45+
odata = "blob %i\0" % len(data)
46+
return odata + data
47+
48+
#} END routines
49+
50+
#{ Adapters
51+
2752
class StringProcessAdapter(object):
28-
"""Allows to use strings as Process object as returned by SubProcess.Popen.
29-
Its tailored to work with the test system only"""
30-
31-
def __init__(self, input_string):
32-
self.stdout = cStringIO.StringIO(input_string)
33-
self.stderr = cStringIO.StringIO()
34-
35-
def wait(self):
36-
return 0
37-
38-
poll = wait
39-
53+
"""Allows to use strings as Process object as returned by SubProcess.Popen.
54+
Its tailored to work with the test system only"""
55+
56+
def __init__(self, input_string):
57+
self.stdout = cStringIO.StringIO(input_string)
58+
self.stderr = cStringIO.StringIO()
59+
60+
def wait(self):
61+
return 0
62+
63+
poll = wait
64+
65+
#} END adapters
66+
67+
#{ Decorators
4068

4169
def _rmtree_onerror(osremove, fullpath, exec_info):
42-
"""
43-
Handle the case on windows that read-only files cannot be deleted by
44-
os.remove by setting it to mode 777, then retry deletion.
45-
"""
46-
if os.name != 'nt' or osremove is not os.remove:
47-
raise
48-
49-
os.chmod(fullpath, 0777)
50-
os.remove(fullpath)
70+
"""
71+
Handle the case on windows that read-only files cannot be deleted by
72+
os.remove by setting it to mode 777, then retry deletion.
73+
"""
74+
if os.name != 'nt' or osremove is not os.remove:
75+
raise
76+
77+
os.chmod(fullpath, 0777)
78+
os.remove(fullpath)
5179

5280
def with_bare_rw_repo(func):
53-
"""
54-
Decorator providing a specially made read-write repository to the test case
55-
decorated with it. The test case requires the following signature::
56-
def case(self, rw_repo)
57-
58-
The rwrepo will be a bare clone or the types rorepo. Once the method finishes,
59-
it will be removed completely.
60-
61-
Use this if you want to make purely index based adjustments, change refs, create
62-
heads, generally operations that do not need a working tree.
63-
"""
64-
def bare_repo_creator(self):
65-
repo_dir = tempfile.mktemp("bare_repo")
66-
rw_repo = self.rorepo.clone(repo_dir, shared=True, bare=True)
67-
prev_cwd = os.getcwd()
68-
try:
69-
return func(self, rw_repo)
70-
finally:
71-
rw_repo.git.clear_cache()
72-
shutil.rmtree(repo_dir, onerror=_rmtree_onerror)
73-
# END cleanup
74-
# END bare repo creator
75-
bare_repo_creator.__name__ = func.__name__
76-
return bare_repo_creator
77-
81+
"""
82+
Decorator providing a specially made read-write repository to the test case
83+
decorated with it. The test case requires the following signature::
84+
def case(self, rw_repo)
85+
86+
The rwrepo will be a bare clone or the types rorepo. Once the method finishes,
87+
it will be removed completely.
88+
89+
Use this if you want to make purely index based adjustments, change refs, create
90+
heads, generally operations that do not need a working tree.
91+
"""
92+
def bare_repo_creator(self):
93+
repo_dir = tempfile.mktemp("bare_repo")
94+
rw_repo = self.rorepo.clone(repo_dir, shared=True, bare=True)
95+
prev_cwd = os.getcwd()
96+
try:
97+
return func(self, rw_repo)
98+
finally:
99+
rw_repo.git.clear_cache()
100+
shutil.rmtree(repo_dir, onerror=_rmtree_onerror)
101+
# END cleanup
102+
# END bare repo creator
103+
bare_repo_creator.__name__ = func.__name__
104+
return bare_repo_creator
105+
78106
def with_rw_repo(working_tree_ref):
79-
"""
80-
Same as with_bare_repo, but clones the rorepo as non-bare repository, checking
81-
out the working tree at the given working_tree_ref.
82-
83-
This repository type is more costly due to the working copy checkout.
84-
85-
To make working with relative paths easier, the cwd will be set to the working
86-
dir of the repository.
87-
"""
88-
assert isinstance(working_tree_ref, basestring), "Decorator requires ref name for working tree checkout"
89-
def argument_passer(func):
90-
def repo_creator(self):
91-
repo_dir = tempfile.mktemp("non_bare_repo")
92-
rw_repo = self.rorepo.clone(repo_dir, shared=True, bare=False, n=True)
93-
94-
rw_repo.head.commit = working_tree_ref
95-
rw_repo.head.reference.checkout()
96-
97-
prev_cwd = os.getcwd()
98-
os.chdir(rw_repo.working_dir)
99-
try:
100-
return func(self, rw_repo)
101-
finally:
102-
os.chdir(prev_cwd)
103-
rw_repo.git.clear_cache()
104-
shutil.rmtree(repo_dir, onerror=_rmtree_onerror)
105-
# END cleanup
106-
# END rw repo creator
107-
repo_creator.__name__ = func.__name__
108-
return repo_creator
109-
# END argument passer
110-
return argument_passer
111-
107+
"""
108+
Same as with_bare_repo, but clones the rorepo as non-bare repository, checking
109+
out the working tree at the given working_tree_ref.
110+
111+
This repository type is more costly due to the working copy checkout.
112+
113+
To make working with relative paths easier, the cwd will be set to the working
114+
dir of the repository.
115+
"""
116+
assert isinstance(working_tree_ref, basestring), "Decorator requires ref name for working tree checkout"
117+
def argument_passer(func):
118+
def repo_creator(self):
119+
repo_dir = tempfile.mktemp("non_bare_repo")
120+
rw_repo = self.rorepo.clone(repo_dir, shared=True, bare=False, n=True)
121+
122+
rw_repo.head.commit = working_tree_ref
123+
rw_repo.head.reference.checkout()
124+
125+
prev_cwd = os.getcwd()
126+
os.chdir(rw_repo.working_dir)
127+
try:
128+
return func(self, rw_repo)
129+
finally:
130+
os.chdir(prev_cwd)
131+
rw_repo.git.clear_cache()
132+
shutil.rmtree(repo_dir, onerror=_rmtree_onerror)
133+
# END cleanup
134+
# END rw repo creator
135+
repo_creator.__name__ = func.__name__
136+
return repo_creator
137+
# END argument passer
138+
return argument_passer
139+
112140
def with_rw_and_rw_remote_repo(working_tree_ref):
113-
"""
114-
Same as with_rw_repo, but also provides a writable remote repository from which the
115-
rw_repo has been forked as well as a handle for a git-daemon that may be started to
116-
run the remote_repo.
117-
The remote repository was cloned as bare repository from the rorepo, wheras
118-
the rw repo has a working tree and was cloned from the remote repository.
119-
120-
remote_repo has two remotes: origin and daemon_origin. One uses a local url,
121-
the other uses a server url. The daemon setup must be done on system level
122-
and should be an inetd service that serves tempdir.gettempdir() and all
123-
directories in it.
124-
125-
The following scetch demonstrates this::
126-
rorepo ---<bare clone>---> rw_remote_repo ---<clone>---> rw_repo
127-
128-
The test case needs to support the following signature::
129-
def case(self, rw_repo, rw_remote_repo)
130-
131-
This setup allows you to test push and pull scenarios and hooks nicely.
132-
133-
See working dir info in with_rw_repo
134-
"""
135-
assert isinstance(working_tree_ref, basestring), "Decorator requires ref name for working tree checkout"
136-
def argument_passer(func):
137-
def remote_repo_creator(self):
138-
remote_repo_dir = tempfile.mktemp("remote_repo")
139-
repo_dir = tempfile.mktemp("remote_clone_non_bare_repo")
140-
141-
rw_remote_repo = self.rorepo.clone(remote_repo_dir, shared=True, bare=True)
142-
rw_repo = rw_remote_repo.clone(repo_dir, shared=True, bare=False, n=True) # recursive alternates info ?
143-
rw_repo.head.commit = working_tree_ref
144-
rw_repo.head.reference.checkout()
145-
146-
# prepare for git-daemon
147-
rw_remote_repo.daemon_export = True
148-
149-
# this thing is just annoying !
150-
crw = rw_remote_repo.config_writer()
151-
section = "daemon"
152-
try:
153-
crw.add_section(section)
154-
except Exception:
155-
pass
156-
crw.set(section, "receivepack", True)
157-
# release lock
158-
del(crw)
159-
160-
# initialize the remote - first do it as local remote and pull, then
161-
# we change the url to point to the daemon. The daemon should be started
162-
# by the user, not by us
163-
d_remote = Remote.create(rw_repo, "daemon_origin", remote_repo_dir)
164-
d_remote.fetch()
165-
remote_repo_url = "git://localhost%s" % remote_repo_dir
166-
d_remote.config_writer.set('url', remote_repo_url)
167-
168-
# try to list remotes to diagnoes whether the server is up
169-
try:
170-
rw_repo.git.ls_remote(d_remote)
171-
except GitCommandError,e:
172-
print str(e)
173-
if os.name == 'nt':
174-
raise AssertionError('git-daemon needs to run this test, but windows does not have one. Otherwise, run: git-daemon "%s"'%tempfile.gettempdir())
175-
else:
176-
raise AssertionError('Please start a git-daemon to run this test, execute: git-daemon "%s"'%tempfile.gettempdir())
177-
178-
# adjust working dir
179-
prev_cwd = os.getcwd()
180-
os.chdir(rw_repo.working_dir)
181-
try:
182-
return func(self, rw_repo, rw_remote_repo)
183-
finally:
184-
os.chdir(prev_cwd)
185-
rw_repo.git.clear_cache()
186-
rw_remote_repo.git.clear_cache()
187-
shutil.rmtree(repo_dir, onerror=_rmtree_onerror)
188-
shutil.rmtree(remote_repo_dir, onerror=_rmtree_onerror)
189-
# END cleanup
190-
# END bare repo creator
191-
remote_repo_creator.__name__ = func.__name__
192-
return remote_repo_creator
193-
# END remote repo creator
194-
# END argument parsser
195-
196-
return argument_passer
197-
198-
141+
"""
142+
Same as with_rw_repo, but also provides a writable remote repository from which the
143+
rw_repo has been forked as well as a handle for a git-daemon that may be started to
144+
run the remote_repo.
145+
The remote repository was cloned as bare repository from the rorepo, wheras
146+
the rw repo has a working tree and was cloned from the remote repository.
147+
148+
remote_repo has two remotes: origin and daemon_origin. One uses a local url,
149+
the other uses a server url. The daemon setup must be done on system level
150+
and should be an inetd service that serves tempdir.gettempdir() and all
151+
directories in it.
152+
153+
The following scetch demonstrates this::
154+
rorepo ---<bare clone>---> rw_remote_repo ---<clone>---> rw_repo
155+
156+
The test case needs to support the following signature::
157+
def case(self, rw_repo, rw_remote_repo)
158+
159+
This setup allows you to test push and pull scenarios and hooks nicely.
160+
161+
See working dir info in with_rw_repo
162+
"""
163+
assert isinstance(working_tree_ref, basestring), "Decorator requires ref name for working tree checkout"
164+
def argument_passer(func):
165+
def remote_repo_creator(self):
166+
remote_repo_dir = tempfile.mktemp("remote_repo")
167+
repo_dir = tempfile.mktemp("remote_clone_non_bare_repo")
168+
169+
rw_remote_repo = self.rorepo.clone(remote_repo_dir, shared=True, bare=True)
170+
rw_repo = rw_remote_repo.clone(repo_dir, shared=True, bare=False, n=True) # recursive alternates info ?
171+
rw_repo.head.commit = working_tree_ref
172+
rw_repo.head.reference.checkout()
173+
174+
# prepare for git-daemon
175+
rw_remote_repo.daemon_export = True
176+
177+
# this thing is just annoying !
178+
crw = rw_remote_repo.config_writer()
179+
section = "daemon"
180+
try:
181+
crw.add_section(section)
182+
except Exception:
183+
pass
184+
crw.set(section, "receivepack", True)
185+
# release lock
186+
del(crw)
187+
188+
# initialize the remote - first do it as local remote and pull, then
189+
# we change the url to point to the daemon. The daemon should be started
190+
# by the user, not by us
191+
d_remote = Remote.create(rw_repo, "daemon_origin", remote_repo_dir)
192+
d_remote.fetch()
193+
remote_repo_url = "git://localhost%s" % remote_repo_dir
194+
d_remote.config_writer.set('url', remote_repo_url)
195+
196+
# try to list remotes to diagnoes whether the server is up
197+
try:
198+
rw_repo.git.ls_remote(d_remote)
199+
except GitCommandError,e:
200+
print str(e)
201+
if os.name == 'nt':
202+
raise AssertionError('git-daemon needs to run this test, but windows does not have one. Otherwise, run: git-daemon "%s"'%tempfile.gettempdir())
203+
else:
204+
raise AssertionError('Please start a git-daemon to run this test, execute: git-daemon "%s"'%tempfile.gettempdir())
205+
206+
# adjust working dir
207+
prev_cwd = os.getcwd()
208+
os.chdir(rw_repo.working_dir)
209+
try:
210+
return func(self, rw_repo, rw_remote_repo)
211+
finally:
212+
os.chdir(prev_cwd)
213+
rw_repo.git.clear_cache()
214+
rw_remote_repo.git.clear_cache()
215+
shutil.rmtree(repo_dir, onerror=_rmtree_onerror)
216+
shutil.rmtree(remote_repo_dir, onerror=_rmtree_onerror)
217+
# END cleanup
218+
# END bare repo creator
219+
remote_repo_creator.__name__ = func.__name__
220+
return remote_repo_creator
221+
# END remote repo creator
222+
# END argument parsser
223+
224+
return argument_passer
225+
226+
#} END decorators
227+
199228
class TestBase(TestCase):
200-
"""
201-
Base Class providing default functionality to all tests such as:
202-
203-
- Utility functions provided by the TestCase base of the unittest method such as::
204-
self.fail("todo")
205-
self.failUnlessRaises(...)
206-
207-
- Class level repository which is considered read-only as it is shared among
208-
all test cases in your type.
209-
Access it using::
210-
self.rorepo # 'ro' stands for read-only
211-
212-
The rorepo is in fact your current project's git repo. If you refer to specific
213-
shas for your objects, be sure you choose some that are part of the immutable portion
214-
of the project history ( to assure tests don't fail for others ).
215-
"""
216-
217-
@classmethod
218-
def setUpAll(cls):
219-
"""
220-
Dynamically add a read-only repository to our actual type. This way
221-
each test type has its own repository
222-
"""
223-
cls.rorepo = Repo(GIT_REPO)
224-
225-
def _make_file(self, rela_path, data, repo=None):
226-
"""
227-
Create a file at the given path relative to our repository, filled
228-
with the given data. Returns absolute path to created file.
229-
"""
230-
repo = repo or self.rorepo
231-
abs_path = os.path.join(repo.working_tree_dir, rela_path)
232-
fp = open(abs_path, "w")
233-
fp.write(data)
234-
fp.close()
235-
return abs_path
229+
"""
230+
Base Class providing default functionality to all tests such as:
231+
232+
- Utility functions provided by the TestCase base of the unittest method such as::
233+
self.fail("todo")
234+
self.failUnlessRaises(...)
235+
236+
- Class level repository which is considered read-only as it is shared among
237+
all test cases in your type.
238+
Access it using::
239+
self.rorepo # 'ro' stands for read-only
240+
241+
The rorepo is in fact your current project's git repo. If you refer to specific
242+
shas for your objects, be sure you choose some that are part of the immutable portion
243+
of the project history ( to assure tests don't fail for others ).
244+
"""
245+
246+
@classmethod
247+
def setUpAll(cls):
248+
"""
249+
Dynamically add a read-only repository to our actual type. This way
250+
each test type has its own repository
251+
"""
252+
cls.rorepo = Repo(GIT_REPO)
253+
254+
def _make_file(self, rela_path, data, repo=None):
255+
"""
256+
Create a file at the given path relative to our repository, filled
257+
with the given data. Returns absolute path to created file.
258+
"""
259+
repo = repo or self.rorepo
260+
abs_path = os.path.join(repo.working_tree_dir, rela_path)
261+
fp = open(abs_path, "w")
262+
fp.write(data)
263+
fp.close()
264+
return abs_path

0 commit comments

Comments
 (0)
Please sign in to comment.