summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDaniel Stenberg <daniel@haxx.se>2002-05-13 07:28:10 +0000
committerDaniel Stenberg <daniel@haxx.se>2002-05-13 07:28:10 +0000
commit1913b4eeed8fde06b544d883c3b599c71ca47f5f (patch)
tree8ed863dd75aa873e92fed6915acb50ad8c25a11d
parentb44a4da5df3a0ba5623b5dc3c72a31bf60cea464 (diff)
downloadgnurl-1913b4eeed8fde06b544d883c3b599c71ca47f5f.tar.gz
gnurl-1913b4eeed8fde06b544d883c3b599c71ca47f5f.tar.bz2
gnurl-1913b4eeed8fde06b544d883c3b599c71ca47f5f.zip
fopen.c added, a fopen() style emulation for URL reading
-rw-r--r--docs/examples/Makefile.am11
-rw-r--r--docs/examples/fopen.c222
2 files changed, 228 insertions, 5 deletions
diff --git a/docs/examples/Makefile.am b/docs/examples/Makefile.am
index 13634302e..c0c82f3b5 100644
--- a/docs/examples/Makefile.am
+++ b/docs/examples/Makefile.am
@@ -4,11 +4,12 @@
AUTOMAKE_OPTIONS = foreign no-dependencies
-EXTRA_DIST = README curlgtk.c sepheaders.c simple.c postit2.c \
- win32sockets.c persistant.c ftpget.c Makefile.example \
- multithread.c getinmemory.c ftpupload.c httpput.c \
- simplessl.c ftpgetresp.c http-post.c post-callback.c \
- multi-app.c multi-double.c multi-single.c multi-post.c
+EXTRA_DIST = README curlgtk.c sepheaders.c simple.c postit2.c \
+ win32sockets.c persistant.c ftpget.c Makefile.example \
+ multithread.c getinmemory.c ftpupload.c httpput.c \
+ simplessl.c ftpgetresp.c http-post.c post-callback.c \
+ multi-app.c multi-double.c multi-single.c multi-post.c \
+ fopen.c
all:
@echo "done"
diff --git a/docs/examples/fopen.c b/docs/examples/fopen.c
new file mode 100644
index 000000000..a60d10334
--- /dev/null
+++ b/docs/examples/fopen.c
@@ -0,0 +1,222 @@
+/*****************************************************************************
+ * _ _ ____ _
+ * Project ___| | | | _ \| |
+ * / __| | | | |_) | |
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+ * $Id$
+ *
+ * This example source code introduces an fopen()/fread()/fclose() emulation
+ * for URL reads. Using an approach similar to this, you could replace your
+ * program's fopen() with this url_fopen() and fread() with url_fread() and
+ * it should be possible to read remote streams instead of (only) local files.
+ *
+ * See the main() function at the bottom that shows a tiny app in action.
+ *
+ * This source code is a proof of concept. It will need further attention to
+ * become production-use useful and solid.
+ *
+ * This example requires libcurl 7.9.7 or later.
+ */
+#include <stdio.h>
+#include <string.h>
+#include <sys/time.h>
+
+#include <curl/curl.h>
+#include <curl/types.h>
+#include <curl/easy.h>
+
+struct data {
+ int type;
+ union {
+ CURL *curl;
+ FILE *file;
+ } handle;
+
+ /* TODO: We should perhaps document the biggest possible buffer chunk we can
+ get from libcurl in one single callback... */
+ char buffer[CURL_MAX_WRITE_SIZE];
+
+ char *readptr; /* read from here */
+ int bytes; /* bytes available from read pointer */
+
+ CURLMcode m; /* stored from a previous url_fread() */
+};
+
+typedef struct data URL_FILE;
+
+/* we use a global one for convenience */
+CURLM *multi_handle;
+
+static
+size_t write_callback(char *buffer,
+ size_t size,
+ size_t nitems,
+ void *userp)
+{
+ URL_FILE *url = (URL_FILE *)userp;
+ size *= nitems;
+
+ memcpy(url->readptr, buffer, size);
+ url->readptr += size;
+ url->bytes += size;
+
+ return size;
+}
+
+URL_FILE *url_fopen(char *url, char *operation)
+{
+ /* this code could check for URLs or types in the 'url' and
+ basicly use the real fopen() for standard files */
+
+ URL_FILE *file;
+ int still_running;
+
+ file = (URL_FILE *)malloc(sizeof(URL_FILE));
+ if(!file)
+ return NULL;
+
+ memset(file, 0, sizeof(URL_FILE));
+
+ file->type = 1; /* marked as URL, use 0 for plain file */
+ file->handle.curl = curl_easy_init();
+
+ curl_easy_setopt(file->handle.curl, CURLOPT_URL, url);
+ curl_easy_setopt(file->handle.curl, CURLOPT_FILE, file);
+ curl_easy_setopt(file->handle.curl, CURLOPT_VERBOSE, FALSE);
+ curl_easy_setopt(file->handle.curl, CURLOPT_WRITEFUNCTION, write_callback);
+
+ if(!multi_handle)
+ multi_handle = curl_multi_init();
+
+ curl_multi_add_handle(multi_handle, file->handle.curl);
+
+ while(CURLM_CALL_MULTI_PERFORM ==
+ curl_multi_perform(multi_handle, &still_running));
+
+ /* if still_running would be 0 now, we should return NULL */
+
+ return file;
+}
+
+void url_fclose(URL_FILE *file)
+{
+ /* make sure the easy handle is not in the multi handle anymore */
+ curl_multi_remove_handle(multi_handle, file->handle.curl);
+
+ /* cleanup */
+ curl_easy_cleanup(file->handle.curl);
+}
+
+
+
+size_t url_fread(void *ptr, size_t size, size_t nmemb, URL_FILE *file)
+{
+ fd_set fdread;
+ fd_set fdwrite;
+ fd_set fdexcep;
+ int maxfd;
+ struct timeval timeout;
+ int rc;
+ int still_running = 0;
+
+ if(!file->bytes) { /* no data available at this point */
+
+ file->readptr = file->buffer; /* reset read pointer */
+
+ if(CURLM_CALL_MULTI_PERFORM == file->m) {
+ while(CURLM_CALL_MULTI_PERFORM ==
+ curl_multi_perform(multi_handle, &still_running)) {
+ if(file->bytes) {
+ printf("(fread) WOAH! THis happened!\n");
+ break;
+ }
+ }
+ if(!still_running) {
+ printf("NO MORE RUNNING AROUND!\n");
+ return 0;
+ }
+ }
+
+ FD_ZERO(&fdread);
+ FD_ZERO(&fdwrite);
+ FD_ZERO(&fdexcep);
+
+ /* set a suitable timeout to fail on */
+ timeout.tv_sec = 500; /* 5 minutes */
+ timeout.tv_usec = 0;
+
+ /* get file descriptors from the transfers */
+ curl_multi_fdset(multi_handle, &fdread, &fdwrite, &fdexcep, &maxfd);
+
+ rc = select(maxfd+1, &fdread, &fdwrite, &fdexcep, &timeout);
+
+ switch(rc) {
+ case -1:
+ /* select error */
+ break;
+ case 0:
+ break;
+ default:
+ /* timeout or readable/writable sockets */
+ do {
+ file->m = curl_multi_perform(multi_handle, &still_running);
+
+ if(file->bytes)
+ /* we have received data, return that now */
+ break;
+
+ } while(CURLM_CALL_MULTI_PERFORM == file->m);
+
+
+ if(!still_running)
+ printf("NO MORE RUNNING AROUND!\n");
+
+ break;
+ }
+ }
+ else
+ printf("(fread) Skip network read\n");
+
+ if(file->bytes) {
+ /* data already available, return that */
+ int want = size * nmemb;
+
+ if(file->bytes < want)
+ want = file->bytes;
+
+ memcpy(ptr, file->readptr, want);
+ file->readptr += want;
+ file->bytes -= want;
+
+ printf("(fread) return %d bytes\n", want);
+
+ return want;
+ }
+ return 0; /* no data available to return */
+}
+
+
+int main(int argc, char *argv[])
+{
+ URL_FILE *handle;
+ int nread;
+ char buffer[256];
+
+ handle = url_fopen("http://www.haxx.se", "r");
+
+ if(!handle) {
+ printf("couldn't url_fopen()\n");
+ }
+
+ do {
+ nread = url_fread(buffer, sizeof(buffer), 1, handle);
+
+ printf("We got: %d bytes\n", nread);
+ } while(nread);
+
+ url_fclose(handle);
+
+ return 0;
+}