May 28 2019 11:44 AM
I have the following codes to open a host file, write one line at the beginning of the file then close this file. It performed much slower on a large file (175,000 bytes) than on a small file (35 bytes). The time spent mostly on the CloseHandle() system call. Does anyone have the same problem? Is it a Microsoft Windows's bug? Any response is appreciated.
// filetest.c : Testing performance of file IO in Widnows.
//
#include <Windows.h>
#include <stdio.h>
#include <string.h>
char *data = "The line to be put in messages.log\n";
int logtofile(char *filename, char *buf, int siz)
{
int j; /* timeout */
int nw;
HANDLE han;
j = 10;
do {
DWORD lasterror;
han = CreateFileA(filename,GENERIC_WRITE,
FILE_SHARE_READ,NULL,
OPEN_ALWAYS,FILE_ATTRIBUTE_NORMAL,NULL);
lasterror = GetLastError();
if (han == INVALID_HANDLE_VALUE) {
if (lasterror == ERROR_SHARING_VIOLATION) Sleep(5);
else {
printf("\n CreatFile Failed error=%d",lasterror);
return 0;
}
}
} while ((han == INVALID_HANDLE_VALUE) && --j);
if (han != INVALID_HANDLE_VALUE) {
j = WriteFile(han,buf,siz,&nw,NULL);
if (!j) {
printf("\nlogtofile error=%d",GetLastError());
}
}
j = CloseHandle(han);
return 0;
}
int main(int argc, char* argv[])
{
char *nam;
int iter, i, len, msec;
time_t sec;
int msec;
struct timeb starttimeb, endtimeb;
if (argc == 1) {
printf("usage: filetest <fname> [iteration] ");
return 0;
}
nam = argv[1];
len = strlen(data);
if (argc > 2) iter = atol(argv[2]);
else iter = 5000;
ftime(&starttimeb);
for (i = 0; i < iter; i++) {
logtofile(nam,data,len);
}
ftime(&endtimeb);
sec = endtimeb.time - starttimeb.time;
if (endtimeb.millitm < starttimeb.millitm) {
sec--;
msec = endtimeb.millitm + 1000 - starttimeb.millitm;
} else {
msec = endtimeb.millitm - starttimeb.millitm;
}
printf("\nSecond: %lld, millisecond: %d\n",sec,msec);
return 0;
}
Dec 19 2019 04:53 AM