DAV_HEADER_TIMEOUT = (1u << 2)
};
+static char *xml_entities(char *s)
+{
+ struct strbuf buf = STRBUF_INIT;
+ while (*s) {
+ size_t len = strcspn(s, "\"<>&");
+ strbuf_add(&buf, s, len);
+ s += len;
+ switch (*s) {
+ case '"':
+ strbuf_addstr(&buf, """);
+ break;
+ case '<':
+ strbuf_addstr(&buf, "<");
+ break;
+ case '>':
+ strbuf_addstr(&buf, ">");
+ break;
+ case '&':
+ strbuf_addstr(&buf, "&");
+ break;
+ }
+ s++;
+ }
+ return strbuf_detach(&buf, NULL);
+}
+
static struct curl_slist *get_dav_token_headers(struct remote_lock *lock, enum dav_header_flag options)
{
struct strbuf buf = STRBUF_INIT;
struct transfer_request *request = (struct transfer_request *)data;
do {
ssize_t retval = xwrite(request->local_fileno,
- (char *) ptr + posn, size - posn);
+ (char *) ptr + posn, size - posn);
if (retval < 0)
return posn;
posn += retval;
request->stream.avail_out = sizeof(expn);
request->zret = git_inflate(&request->stream, Z_SYNC_FLUSH);
git_SHA1_Update(&request->c, expn,
- sizeof(expn) - request->stream.avail_out);
+ sizeof(expn) - request->stream.avail_out);
} while (request->stream.avail_in && request->zret == Z_OK);
data_received++;
return size;
"%s.temp", filename);
snprintf(prevfile, sizeof(prevfile), "%s.prev", request->filename);
- unlink(prevfile);
+ unlink_or_warn(prevfile);
rename(request->tmpfile, prevfile);
- unlink(request->tmpfile);
+ unlink_or_warn(request->tmpfile);
if (request->local_fileno != -1)
error("fd leakage in start: %d", request->local_fileno);
request->local_fileno = open(request->tmpfile,
O_WRONLY | O_CREAT | O_EXCL, 0666);
- /* This could have failed due to the "lazy directory creation";
+ /*
+ * This could have failed due to the "lazy directory creation";
* try to mkdir the last path component.
*/
if (request->local_fileno < 0 && errno == ENOENT) {
url = get_remote_object_url(repo->url, hex, 0);
request->url = xstrdup(url);
- /* If a previous temp file is present, process what was already
- fetched. */
+ /*
+ * If a previous temp file is present, process what was already
+ * fetched.
+ */
prevlocal = open(prevfile, O_RDONLY);
if (prevlocal != -1) {
do {
if (fwrite_sha1_file(prev_buf,
1,
prev_read,
- request) == prev_read) {
+ request) == prev_read)
prev_posn += prev_read;
- } else {
+ else
prev_read = -1;
- }
}
} while (prev_read > 0);
close(prevlocal);
}
- unlink(prevfile);
+ unlink_or_warn(prevfile);
- /* Reset inflate/SHA1 if there was an error reading the previous temp
- file; also rewind to the beginning of the local file. */
+ /*
+ * Reset inflate/SHA1 if there was an error reading the previous temp
+ * file; also rewind to the beginning of the local file.
+ */
if (prev_read == -1) {
memset(&request->stream, 0, sizeof(request->stream));
git_inflate_init(&request->stream);
curl_easy_setopt(slot->curl, CURLOPT_URL, url);
curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, no_pragma_header);
- /* If we have successfully processed data from a previous fetch
- attempt, only fetch the data we don't already have. */
+ /*
+ * If we have successfully processed data from a previous fetch
+ * attempt, only fetch the data we don't already have.
+ */
if (prev_posn>0) {
if (push_verbosely)
fprintf(stderr,
curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, no_pragma_header);
slot->local = packfile;
- /* If there is data present from a previous transfer attempt,
- resume where it left off */
+ /*
+ * If there is data present from a previous transfer attempt,
+ * resume where it left off
+ */
prev_posn = ftell(packfile);
if (prev_posn>0) {
if (push_verbosely)
curl_easy_setopt(slot->curl, CURLOPT_INFILE, &request->buffer);
curl_easy_setopt(slot->curl, CURLOPT_INFILESIZE, request->buffer.buf.len);
curl_easy_setopt(slot->curl, CURLOPT_READFUNCTION, fread_buffer);
+#ifndef NO_CURL_IOCTL
+ curl_easy_setopt(slot->curl, CURLOPT_IOCTLFUNCTION, ioctl_buffer);
+ curl_easy_setopt(slot->curl, CURLOPT_IOCTLDATA, &request->buffer);
+#endif
curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_null);
curl_easy_setopt(slot->curl, CURLOPT_CUSTOMREQUEST, DAV_PUT);
curl_easy_setopt(slot->curl, CURLOPT_UPLOAD, 1);
struct stat st;
struct packed_git *target;
struct packed_git **lst;
+ struct active_request_slot *slot;
request->curl_result = request->slot->curl_result;
request->http_code = request->slot->http_code;
+ slot = request->slot;
request->slot = NULL;
/* Keep locks active */
aborted = 1;
}
} else if (request->state == RUN_FETCH_LOOSE) {
- fchmod(request->local_fileno, 0444);
- close(request->local_fileno); request->local_fileno = -1;
+ close(request->local_fileno);
+ request->local_fileno = -1;
if (request->curl_result != CURLE_OK &&
request->http_code != 416) {
if (stat(request->tmpfile, &st) == 0) {
if (st.st_size == 0)
- unlink(request->tmpfile);
+ unlink_or_warn(request->tmpfile);
}
} else {
if (request->http_code == 416)
git_inflate_end(&request->stream);
git_SHA1_Final(request->real_sha1, &request->c);
if (request->zret != Z_STREAM_END) {
- unlink(request->tmpfile);
+ unlink_or_warn(request->tmpfile);
} else if (hashcmp(request->obj->sha1, request->real_sha1)) {
- unlink(request->tmpfile);
+ unlink_or_warn(request->tmpfile);
} else {
request->rename =
move_temp_to_file(
request->tmpfile,
request->filename);
- if (request->rename == 0) {
+ if (request->rename == 0)
request->obj->flags |= (LOCAL | REMOTE);
- }
}
}
fclose(request->local_stream);
request->local_stream = NULL;
+ slot->local = NULL;
if (!move_temp_to_file(request->tmpfile,
request->filename)) {
target = (struct packed_git *)request->userData;
}
#ifdef USE_CURL_MULTI
+static int is_running_queue;
static int fill_active_slot(void *unused)
{
struct transfer_request *request;
- if (aborted)
+ if (aborted || !is_running_queue)
return 0;
for (request = request_queue_head; request; request = request->next) {
curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, no_pragma_header);
slot->local = indexfile;
- /* If there is data present from a previous transfer attempt,
- resume where it left off */
+ /*
+ * If there is data present from a previous transfer attempt,
+ * resume where it left off
+ */
prev_posn = ftell(indexfile);
if (prev_posn>0) {
if (push_verbosely)
if (results.curl_result != CURLE_OK) {
free(url);
fclose(indexfile);
+ slot->local = NULL;
return error("Unable to get pack index %s\n%s", url,
curl_errorstr);
}
} else {
free(url);
fclose(indexfile);
+ slot->local = NULL;
return error("Unable to start request");
}
free(url);
fclose(indexfile);
+ slot->local = NULL;
return move_temp_to_file(tmpfile, filename);
}
struct remote_lock *lock = NULL;
struct curl_slist *dav_headers = NULL;
struct xml_ctx ctx;
+ char *escaped;
url = xmalloc(strlen(repo->url) + strlen(path) + 1);
sprintf(url, "%s%s", repo->url, path);
ep = strchr(ep + 1, '/');
}
- strbuf_addf(&out_buffer.buf, LOCK_REQUEST, git_default_email);
+ escaped = xml_entities(git_default_email);
+ strbuf_addf(&out_buffer.buf, LOCK_REQUEST, escaped);
+ free(escaped);
sprintf(timeout_header, "Timeout: Second-%ld", timeout);
dav_headers = curl_slist_append(dav_headers, timeout_header);
curl_easy_setopt(slot->curl, CURLOPT_INFILE, &out_buffer);
curl_easy_setopt(slot->curl, CURLOPT_INFILESIZE, out_buffer.buf.len);
curl_easy_setopt(slot->curl, CURLOPT_READFUNCTION, fread_buffer);
+#ifndef NO_CURL_IOCTL
+ curl_easy_setopt(slot->curl, CURLOPT_IOCTLFUNCTION, ioctl_buffer);
+ curl_easy_setopt(slot->curl, CURLOPT_IOCTLDATA, &out_buffer);
+#endif
curl_easy_setopt(slot->curl, CURLOPT_FILE, &in_buffer);
curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_buffer);
curl_easy_setopt(slot->curl, CURLOPT_URL, url);
curl_easy_setopt(slot->curl, CURLOPT_INFILE, &out_buffer);
curl_easy_setopt(slot->curl, CURLOPT_INFILESIZE, out_buffer.buf.len);
curl_easy_setopt(slot->curl, CURLOPT_READFUNCTION, fread_buffer);
+#ifndef NO_CURL_IOCTL
+ curl_easy_setopt(slot->curl, CURLOPT_IOCTLFUNCTION, ioctl_buffer);
+ curl_easy_setopt(slot->curl, CURLOPT_IOCTLDATA, &out_buffer);
+#endif
curl_easy_setopt(slot->curl, CURLOPT_FILE, &in_buffer);
curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_buffer);
curl_easy_setopt(slot->curl, CURLOPT_URL, url);
struct curl_slist *dav_headers = NULL;
struct xml_ctx ctx;
int lock_flags = 0;
+ char *escaped;
- strbuf_addf(&out_buffer.buf, PROPFIND_SUPPORTEDLOCK_REQUEST, repo->url);
+ escaped = xml_entities(repo->url);
+ strbuf_addf(&out_buffer.buf, PROPFIND_SUPPORTEDLOCK_REQUEST, escaped);
+ free(escaped);
dav_headers = curl_slist_append(dav_headers, "Depth: 0");
dav_headers = curl_slist_append(dav_headers, "Content-Type: text/xml");
curl_easy_setopt(slot->curl, CURLOPT_INFILE, &out_buffer);
curl_easy_setopt(slot->curl, CURLOPT_INFILESIZE, out_buffer.buf.len);
curl_easy_setopt(slot->curl, CURLOPT_READFUNCTION, fread_buffer);
+#ifndef NO_CURL_IOCTL
+ curl_easy_setopt(slot->curl, CURLOPT_IOCTLFUNCTION, ioctl_buffer);
+ curl_easy_setopt(slot->curl, CURLOPT_IOCTLDATA, &out_buffer);
+#endif
curl_easy_setopt(slot->curl, CURLOPT_FILE, &in_buffer);
curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_buffer);
curl_easy_setopt(slot->curl, CURLOPT_URL, repo->url);
curl_easy_setopt(slot->curl, CURLOPT_INFILE, &out_buffer);
curl_easy_setopt(slot->curl, CURLOPT_INFILESIZE, out_buffer.buf.len);
curl_easy_setopt(slot->curl, CURLOPT_READFUNCTION, fread_buffer);
+#ifndef NO_CURL_IOCTL
+ curl_easy_setopt(slot->curl, CURLOPT_IOCTLFUNCTION, ioctl_buffer);
+ curl_easy_setopt(slot->curl, CURLOPT_IOCTLDATA, &out_buffer);
+#endif
curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_null);
curl_easy_setopt(slot->curl, CURLOPT_CUSTOMREQUEST, DAV_PUT);
curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, dav_headers);
curl_easy_setopt(slot->curl, CURLOPT_INFILE, &buffer);
curl_easy_setopt(slot->curl, CURLOPT_INFILESIZE, buffer.buf.len);
curl_easy_setopt(slot->curl, CURLOPT_READFUNCTION, fread_buffer);
+#ifndef NO_CURL_IOCTL
+ curl_easy_setopt(slot->curl, CURLOPT_IOCTLFUNCTION, ioctl_buffer);
+ curl_easy_setopt(slot->curl, CURLOPT_IOCTLDATA, &buffer);
+#endif
curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_null);
curl_easy_setopt(slot->curl, CURLOPT_CUSTOMREQUEST, DAV_PUT);
curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, dav_headers);
return 0;
}
+void run_request_queue(void)
+{
+#ifdef USE_CURL_MULTI
+ is_running_queue = 1;
+ fill_active_slots();
+ add_fill_function(NULL, fill_active_slot);
+#endif
+ do {
+ finish_all_active_slots();
+#ifdef USE_CURL_MULTI
+ fill_active_slots();
+#endif
+ } while (request_queue_head && !aborted);
+
+#ifdef USE_CURL_MULTI
+ is_running_queue = 0;
+#endif
+}
+
int main(int argc, char **argv)
{
struct transfer_request *request;
repo->url = rewritten_url;
}
+#ifdef USE_CURL_MULTI
+ is_running_queue = 0;
+#endif
+
/* Verify DAV compliance/lock support */
if (!locking_available()) {
rc = 1;
local_refs = get_local_heads();
fprintf(stderr, "Fetching remote heads...\n");
get_dav_remote_heads();
+ run_request_queue();
/* Remove a remote branch if -d or -D was specified */
if (delete_branch) {
if (objects_to_send)
fprintf(stderr, " sending %d objects\n",
objects_to_send);
-#ifdef USE_CURL_MULTI
- fill_active_slots();
- add_fill_function(NULL, fill_active_slot);
-#endif
- do {
- finish_all_active_slots();
-#ifdef USE_CURL_MULTI
- fill_active_slots();
-#endif
- } while (request_queue_head && !aborted);
+
+ run_request_queue();
/* Update the remote branch if all went well */
if (aborted || !update_remote(ref->new_sha1, ref_lock))