DragonFly submit List (threaded) for 2003-10
[
Date Prev][
Date Next]
[
Thread Prev][
Thread Next]
[
Date Index][
Thread Index]
binary nvidia driver kld patch and help needed
Hi,
I spent a rainy afternoon trying to get the binary nvidia drivers to work on
DragonFly. Of course, the thing wouldn't compile out of the box, so I tried
hacking it so it would compile and hopefully work. It doesn't work _yet_,
but at least the damn thing loads and detects my nvidia card. X can't do
diddly squat with it though, which might be my bad.
Anyway, what I changed:
- changed cdevsw so it has the dragonfly order
- changed occurrences of d_thread_t to struct thread
- got rid of some useless macros
- tried to get rid of a PZERO (not sure I did that right).
There are two things that I suspect are the reason of why X doesn't work
yet. The rm_ioctl call in src/nvidia_subr.c gives a compiler warning. I
don't know _what_ rm_ioctl does or wants, since that one is buried into the
binary-only object nvidia so kindly provides without source (grmbl). Also,
I'm not sure I got rid of the PZERO in src/nvidia_ctl.c correctly.
I'm not a kernel/module hacker, and I used the source of another module as a
reference (for the cdevsw order mostly), so I might have gone about it in
completely the wrong way. But the thing compiles, loads/unloads without
crashing the kernel, and reports the existence of my GeForce 4 MX
correctly.
Could anyone with both an nvidia card and more kernel hacking knowledge than
myself (the latter one is not a tough requirement), please take a look at
it and tell me where I fscked up and how to fix it.
It would be nice to have hardware accelerated OpenGL in X11 on DragonFly. So
if someone could look at the drm-kmod port, that would be pretty cool too.
Cheers,
Emiel
diff -urN ../NVIDIA-FreeBSD-x86-1.0-4365/src/nv-freebsd.h ./src/nv-freebsd.h
--- ../NVIDIA-FreeBSD-x86-1.0-4365/src/nv-freebsd.h Wed May 28 18:51:53 2003
+++ ./src/nv-freebsd.h Thu Oct 9 09:21:10 2003
@@ -75,12 +75,12 @@
#include <vm/pmap.h>
#include <vm/vm_map.h>
-#include <pci/agpvar.h>
+#include <dev/agp/agpvar.h>
#include <sys/agpio.h>
#include "net/if.h"
-#include "machine/../linux/linux.h"
-#include "compat/linux/linux_ioctl.h"
+#include "emulation/linux/i386/linux.h"
+#include "emulation/linux/linux_ioctl.h"
#if __FreeBSD_version >= 500000
#include <sys/mutex.h>
@@ -97,13 +97,10 @@
* on FreeBSD, due to its thread-aware reference counting.
*/
-#define __TD_FDT(td) ((td)->td_proc->p_fd)
-#define __TD_FDT_CNT(td) ((td)->td_proc->p_fd->fd_refcnt)
-
#else
#include <machine/smp.h>
-#include <pci/pcireg.h>
-#include <pci/pcivar.h>
+#include <bus/pci/pcireg.h>
+#include <bus/pci/pcivar.h>
#define vm_page_lock_queues()
#define vm_page_unlock_queues()
@@ -116,8 +113,6 @@
* on FreeBSD, due to its thread-aware reference counting.
*/
-#define __TD_FDT(td) ((td)->p_fd)
-#define __TD_FDT_CNT(td) ((td)->p_fd->fd_refcnt)
#endif
@@ -302,7 +297,7 @@
void nv_sysctl_exit (nv_state_t *);
/* nvidia_linux.c */
-int linux_ioctl_nvidia (d_thread_t *, struct linux_ioctl_args *);
+int linux_ioctl_nvidia (struct thread *, struct linux_ioctl_args *);
void nvidia_linux_init (void);
void nvidia_linux_exit (void);
@@ -310,14 +305,14 @@
int nvidia_set_primary (struct nv_ioctl_primary_card *);
int nvidia_get_card_info (struct nv_ioctl_card_info *);
int nvidia_get_api_version (struct nv_ioctl_rm_api_version *);
-int nvidia_handle_ioctl (dev_t, u_long, caddr_t, int, d_thread_t *);
+int nvidia_handle_ioctl (dev_t, u_long, caddr_t, int, struct thread *);
/* device helpers */
int nvidia_open_ctl (void);
int nvidia_open_dev (struct nvidia_softc *);
-int nvidia_close_ctl (dev_t, d_thread_t *);
-int nvidia_close_dev (struct nvidia_softc *, dev_t, d_thread_t *);
+int nvidia_close_ctl (dev_t, struct thread *);
+int nvidia_close_dev (struct nvidia_softc *, dev_t, struct thread *);
int nvidia_mmap_dev (struct nvidia_softc *, vm_offset_t, vm_offset_t *);
#endif /* __NV_FREEBSD_H__ */
diff -urN ../NVIDIA-FreeBSD-x86-1.0-4365/src/nvidia_ctl.c ./src/nvidia_ctl.c
--- ../NVIDIA-FreeBSD-x86-1.0-4365/src/nvidia_ctl.c Wed May 28 18:51:52 2003
+++ ./src/nvidia_ctl.c Thu Oct 9 09:15:41 2003
@@ -20,6 +20,12 @@
static struct cdevsw nvidia_ctl_cdevsw = {
#if __FreeBSD_version < 500105
+ /* name */ "nvidiactl",
+ /* maj */ CDEV_MAJOR,
+ /* flags */ D_TRACKCLOSE,
+ /* port */ NULL,
+ /* autoq */ 0,
+
/* open */ nvidia_ctl_open,
/* close */ nvidia_ctl_close,
/* read */ noread,
@@ -28,15 +34,8 @@
/* poll */ nvidia_ctl_poll,
/* mmap */ nommap,
/* strategy */ nostrategy,
- /* name */ "nvidiactl",
- /* maj */ CDEV_MAJOR,
/* dump */ nodump,
- /* psize */ nopsize,
- /* flags */ D_TRACKCLOSE,
-#if __FreeBSD_version < 500000
- /* bmaj */ -1,
-#endif
- /* kqfilter */ NULL,
+ /* psize */ nopsize
#else
.d_open = nvidia_ctl_open,
.d_close = nvidia_ctl_close,
@@ -56,7 +55,7 @@
dev_t dev,
int oflags,
int devtype,
- d_thread_t *td
+ struct thread *td
)
{
int status;
@@ -73,7 +72,7 @@
dev_t dev,
int fflag,
int devtype,
- d_thread_t *td
+ struct thread *td
)
{
int status;
@@ -91,7 +90,7 @@
u_long cmd,
caddr_t data,
int fflag,
- d_thread_t *td
+ struct thread *td
)
{
int status = 0;
@@ -163,7 +162,7 @@
int nvidia_ctl_poll(
dev_t dev,
int events,
- d_thread_t *td
+ struct thread *td
)
{
nv_state_t *nv;
@@ -218,7 +217,7 @@
mtx_init(&sc->mtx_api, "ctl.mtx_api", NULL, MTX_DEF);
mtx_init(&sc->mtx_rm, "ctl.mtx_rm", NULL, MTX_SPIN);
#else
- lockinit(&sc->api_lock, PZERO, "ctl.api_lock", 0, 0);
+ lockinit(&sc->api_lock, 0, "ctl.api_lock", 0, 0);
#endif
}
@@ -243,7 +242,7 @@
mtx_destroy(&sc->mtx_rm);
mtx_destroy(&sc->mtx_api);
#else
- lockmgr(&sc->api_lock, LK_DRAIN, 0, curproc);
+ lockmgr(&sc->api_lock, LK_DRAIN, 0, curthread);
#endif
destroy_dev(nvidia_ctl_cdev);
}
diff -urN ../NVIDIA-FreeBSD-x86-1.0-4365/src/nvidia_dev.c ./src/nvidia_dev.c
--- ../NVIDIA-FreeBSD-x86-1.0-4365/src/nvidia_dev.c Wed May 28 18:51:52 2003
+++ ./src/nvidia_dev.c Thu Oct 9 09:14:38 2003
@@ -21,6 +21,12 @@
static struct cdevsw nvidia_dev_cdevsw = {
#if __FreeBSD_version < 500105
+ /* name */ "nvidia",
+ /* maj */ CDEV_MAJOR,
+ /* flags */ D_MEM|D_TRACKCLOSE,
+ /* port */ NULL,
+ /* autoq */ 0,
+
/* open */ nvidia_dev_open,
/* close */ nvidia_dev_close,
/* read */ noread,
@@ -29,15 +35,8 @@
/* poll */ nvidia_dev_poll,
/* mmap */ nvidia_dev_mmap,
/* strategy */ nostrategy,
- /* name */ "nvidia",
- /* maj */ CDEV_MAJOR,
/* dump */ nodump,
/* psize */ nopsize,
- /* flags */ D_MEM|D_TRACKCLOSE,
-#if __FreeBSD_version < 500000
- /* bmaj */ -1,
-#endif
- /* kqfilter */ NULL,
#else
.d_open = nvidia_dev_open,
.d_close = nvidia_dev_close,
@@ -54,7 +53,7 @@
dev_t dev,
int oflags,
int devtype,
- d_thread_t *td
+ struct thread *td
)
{
nv_state_t *nv;
@@ -86,7 +85,7 @@
dev_t dev,
int fflag,
int devtype,
- d_thread_t *td
+ struct thread *td
)
{
nv_state_t *nv;
@@ -110,7 +109,7 @@
u_long cmd,
caddr_t data,
int fflag,
- d_thread_t *td
+ struct thread *td
)
{
nv_state_t *nv;
@@ -132,7 +131,7 @@
int nvidia_dev_poll(
dev_t dev,
int events,
- d_thread_t *td
+ struct thread *td
)
{
/*
diff -urN ../NVIDIA-FreeBSD-x86-1.0-4365/src/nvidia_linux.c ./src/nvidia_linux.c
--- ../NVIDIA-FreeBSD-x86-1.0-4365/src/nvidia_linux.c Wed May 28 18:51:52 2003
+++ ./src/nvidia_linux.c Thu Oct 9 09:24:44 2003
@@ -18,10 +18,12 @@
#ifdef NV_SUPPORT_LINUX_COMPAT
int linux_ioctl_nvidia(
- d_thread_t *td,
+ struct thread *td,
struct linux_ioctl_args *args
)
{
+ struct proc *p = td->td_proc;
+
/*
* The range has already been checked, and the native NVIDIA ioctl()
* implementation will throw out any commands it does not recognize.
@@ -33,7 +35,7 @@
* copy the user data in/out correctly.
*/
- return (ioctl(td, (struct ioctl_args *) args));
+ return 0;
}
struct linux_ioctl_handler nvidia_handler = {
diff -urN ../NVIDIA-FreeBSD-x86-1.0-4365/src/nvidia_os.c ./src/nvidia_os.c
--- ../NVIDIA-FreeBSD-x86-1.0-4365/src/nvidia_os.c Wed May 28 18:51:52 2003
+++ ./src/nvidia_os.c Thu Oct 9 09:10:30 2003
@@ -164,11 +164,7 @@
BOOL os_is_administrator(PHWINFO pDev)
{
-#if __FreeBSD_version < 500000
- return suser(curproc);
-#else
return suser(curthread);
-#endif
}
U008 os_io_read_byte(
diff -urN ../NVIDIA-FreeBSD-x86-1.0-4365/src/nvidia_pci.c ./src/nvidia_pci.c
--- ../NVIDIA-FreeBSD-x86-1.0-4365/src/nvidia_pci.c Wed May 28 18:51:52 2003
+++ ./src/nvidia_pci.c Thu Oct 9 09:10:30 2003
@@ -96,7 +96,7 @@
mtx_init(&sc->mtx_api, "dev.mtx_api", NULL, MTX_DEF);
mtx_init(&sc->mtx_rm, "dev.mtx_rm", NULL, MTX_SPIN);
#else
- lockinit(&sc->api_lock, PZERO, "dev.api_lock", 0, 0);
+ lockinit(&sc->api_lock, 0, "dev.api_lock", 0, 0);
#endif
return 0;
@@ -115,7 +115,7 @@
mtx_destroy(&sc->mtx_rm);
mtx_destroy(&sc->mtx_api);
#else
- lockmgr(&sc->api_lock, LK_DRAIN, 0, curproc);
+ lockmgr(&sc->api_lock, LK_DRAIN, 0, curthread);
#endif
status = nvidia_detach(dev);
diff -urN ../NVIDIA-FreeBSD-x86-1.0-4365/src/nvidia_subr.c ./src/nvidia_subr.c
--- ../NVIDIA-FreeBSD-x86-1.0-4365/src/nvidia_subr.c Wed May 28 18:51:52 2003
+++ ./src/nvidia_subr.c Thu Oct 9 09:29:17 2003
@@ -297,12 +297,14 @@
u_long cmd,
caddr_t data,
int fflag,
- d_thread_t *td
+ struct thread *td
)
{
struct nvidia_softc *sc;
nv_state_t *nv;
int unit = minor(dev);
+ struct proc *p = td->td_proc;
+ struct filedesc *fdp = p->p_fd;
if (unit == CDEV_CTL_MINOR) {
/* the control device is "special" */
@@ -314,7 +316,7 @@
nv = sc->nv_state;
}
- if (rm_ioctl(nv, __TD_FDT(td), __IOC_NR(cmd), data))
+ if (rm_ioctl(nv, p, fdp, data))
return 0;
return EINVAL;
@@ -338,18 +340,20 @@
int nvidia_close_ctl(
dev_t dev,
- d_thread_t *td
+ struct thread *td
)
{
+ struct proc *p = td->td_proc;
+ struct filedesc *fdp = p->p_fd;
nv_state_t *nv = &nvidia_ctl_state;
- if (__TD_FDT_CNT(td) == 0) {
+ if (fdp->fd_refcnt == 0) {
/*
* The global open/close count allows us to determine when we can
* safely reset this device; tracking of resource manager clients
* is more involved. Hopefully, this solution works as expected.
*/
- rm_free_unused_clients(nv, 0, __TD_FDT(td));
+ rm_free_unused_clients(nv, 0, fdp);
}
nv->usage_count--;
@@ -391,18 +395,20 @@
int nvidia_close_dev(
struct nvidia_softc *sc,
dev_t dev,
- d_thread_t *td
+ struct thread *td
)
{
+ struct proc *p = td->td_proc;
+ struct filedesc *fdp = p->p_fd;
nv_state_t *nv = sc->nv_state;
- if (__TD_FDT_CNT(td) == 0) {
+ if ( fdp->fd_refcnt == 0) {
/*
* The global open/close count allows us to determine when we can
* safely reset this device; tracking of resource manager clients
* is more involved. Hopefully, this solution works as expected.
*/
- rm_free_unused_clients(nv, 0, __TD_FDT(td));
+ rm_free_unused_clients(nv, 0, fdp);
}
nv->usage_count--;
@@ -762,7 +768,7 @@
* of our system calls at a time.
*/
struct nvidia_softc *sc = nv->os_state;
- lockmgr(&sc->api_lock, LK_EXCLUSIVE, 0, curproc);
+ lockmgr(&sc->api_lock, LK_EXCLUSIVE, 0, curthread);
#endif
}
@@ -781,7 +787,7 @@
* its system call.
*/
struct nvidia_softc *sc = nv->os_state;
- lockmgr(&sc->api_lock, LK_RELEASE, 0, curproc);
+ lockmgr(&sc->api_lock, LK_RELEASE, 0, curthread);
#endif
}
[
Date Prev][
Date Next]
[
Thread Prev][
Thread Next]
[
Date Index][
Thread Index]